2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 #include <linux/types.h>
29 #include <linux/slab.h>
31 #include <linux/uaccess.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
40 #include <drm/drm_vma_manager.h>
44 * This file provides some of the base ioctls and library routines for
45 * the graphics memory manager implemented by each device driver.
47 * Because various devices have different requirements in terms of
48 * synchronization and migration strategies, implementing that is left up to
49 * the driver, and all that the general API provides should be generic --
50 * allocating objects, reading/writing data with the cpu, freeing objects.
51 * Even there, platform-dependent optimizations for reading/writing data with
52 * the CPU mean we'll likely hook those out to driver-specific calls. However,
53 * the DRI2 implementation wants to have at least allocate/mmap be generic.
55 * The goal was to have swap-backed object allocation managed through
56 * struct file. However, file descriptors as handles to a struct file have
58 * - Process limits prevent more than 1024 or so being used at a time by
60 * - Inability to allocate high fds will aggravate the X Server's select()
61 * handling, and likely that of many GL client applications as well.
63 * This led to a plan of using our own integer IDs (called handles, following
64 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
65 * ioctls. The objects themselves will still include the struct file so
66 * that we can transition to fds if the required kernel infrastructure shows
67 * up at a later date, and as our interface with shmfs for memory allocation.
71 * We make up offsets for buffer objects so we can recognize them at
75 /* pgoff in mmap is an unsigned long, so we need to make sure that
76 * the faked up offset will fit
79 #if BITS_PER_LONG == 64
80 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
81 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
83 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
84 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
88 * Initialize the GEM device fields
92 drm_gem_init(struct drm_device
*dev
)
94 struct drm_vma_offset_manager
*vma_offset_manager
;
96 mutex_init(&dev
->object_name_lock
);
97 idr_init(&dev
->object_name_idr
);
99 vma_offset_manager
= kzalloc(sizeof(*vma_offset_manager
), GFP_KERNEL
);
100 if (!vma_offset_manager
) {
101 DRM_ERROR("out of memory\n");
105 dev
->vma_offset_manager
= vma_offset_manager
;
106 drm_vma_offset_manager_init(vma_offset_manager
,
107 DRM_FILE_PAGE_OFFSET_START
,
108 DRM_FILE_PAGE_OFFSET_SIZE
);
114 drm_gem_destroy(struct drm_device
*dev
)
117 drm_vma_offset_manager_destroy(dev
->vma_offset_manager
);
118 kfree(dev
->vma_offset_manager
);
119 dev
->vma_offset_manager
= NULL
;
123 * Initialize an already allocated GEM object of the specified size with
124 * shmfs backing store.
126 int drm_gem_object_init(struct drm_device
*dev
,
127 struct drm_gem_object
*obj
, size_t size
)
131 drm_gem_private_object_init(dev
, obj
, size
);
133 filp
= shmem_file_setup("drm mm object", size
, VM_NORESERVE
);
135 return PTR_ERR(filp
);
141 EXPORT_SYMBOL(drm_gem_object_init
);
144 * Initialize an already allocated GEM object of the specified size with
145 * no GEM provided backing store. Instead the caller is responsible for
146 * backing the object and handling it.
148 void drm_gem_private_object_init(struct drm_device
*dev
,
149 struct drm_gem_object
*obj
, size_t size
)
151 BUG_ON((size
& (PAGE_SIZE
- 1)) != 0);
156 kref_init(&obj
->refcount
);
157 obj
->handle_count
= 0;
159 drm_vma_node_reset(&obj
->vma_node
);
161 EXPORT_SYMBOL(drm_gem_private_object_init
);
164 drm_gem_remove_prime_handles(struct drm_gem_object
*obj
, struct drm_file
*filp
)
167 * Note: obj->dma_buf can't disappear as long as we still hold a
168 * handle reference in obj->handle_count.
170 mutex_lock(&filp
->prime
.lock
);
172 drm_prime_remove_buf_handle_locked(&filp
->prime
,
175 mutex_unlock(&filp
->prime
.lock
);
179 * Called after the last handle to the object has been closed
181 * Removes any name for the object. Note that this must be
182 * called before drm_gem_object_free or we'll be touching
185 static void drm_gem_object_handle_free(struct drm_gem_object
*obj
)
187 struct drm_device
*dev
= obj
->dev
;
189 /* Remove any name for this object */
191 idr_remove(&dev
->object_name_idr
, obj
->name
);
196 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object
*obj
)
198 /* Unbreak the reference cycle if we have an exported dma_buf. */
200 dma_buf_put(obj
->dma_buf
);
206 drm_gem_object_handle_unreference_unlocked(struct drm_gem_object
*obj
)
208 if (WARN_ON(obj
->handle_count
== 0))
212 * Must bump handle count first as this may be the last
213 * ref, in which case the object would disappear before we
217 mutex_lock(&obj
->dev
->object_name_lock
);
218 if (--obj
->handle_count
== 0) {
219 drm_gem_object_handle_free(obj
);
220 drm_gem_object_exported_dma_buf_free(obj
);
222 mutex_unlock(&obj
->dev
->object_name_lock
);
224 drm_gem_object_unreference_unlocked(obj
);
228 * Removes the mapping from handle to filp for this object.
231 drm_gem_handle_delete(struct drm_file
*filp
, u32 handle
)
233 struct drm_device
*dev
;
234 struct drm_gem_object
*obj
;
236 /* This is gross. The idr system doesn't let us try a delete and
237 * return an error code. It just spews if you fail at deleting.
238 * So, we have to grab a lock around finding the object and then
239 * doing the delete on it and dropping the refcount, or the user
240 * could race us to double-decrement the refcount and cause a
241 * use-after-free later. Given the frequency of our handle lookups,
242 * we may want to use ida for number allocation and a hash table
243 * for the pointers, anyway.
245 spin_lock(&filp
->table_lock
);
247 /* Check if we currently have a reference on the object */
248 obj
= idr_find(&filp
->object_idr
, handle
);
250 spin_unlock(&filp
->table_lock
);
255 /* Release reference and decrement refcount. */
256 idr_remove(&filp
->object_idr
, handle
);
257 spin_unlock(&filp
->table_lock
);
259 if (drm_core_check_feature(dev
, DRIVER_PRIME
))
260 drm_gem_remove_prime_handles(obj
, filp
);
261 drm_vma_node_revoke(&obj
->vma_node
, filp
->filp
);
263 if (dev
->driver
->gem_close_object
)
264 dev
->driver
->gem_close_object(obj
, filp
);
265 drm_gem_object_handle_unreference_unlocked(obj
);
269 EXPORT_SYMBOL(drm_gem_handle_delete
);
272 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
274 * This implements the ->dumb_destroy kms driver callback for drivers which use
275 * gem to manage their backing storage.
277 int drm_gem_dumb_destroy(struct drm_file
*file
,
278 struct drm_device
*dev
,
281 return drm_gem_handle_delete(file
, handle
);
283 EXPORT_SYMBOL(drm_gem_dumb_destroy
);
286 * drm_gem_handle_create_tail - internal functions to create a handle
288 * This expects the dev->object_name_lock to be held already and will drop it
289 * before returning. Used to avoid races in establishing new handles when
290 * importing an object from either an flink name or a dma-buf.
293 drm_gem_handle_create_tail(struct drm_file
*file_priv
,
294 struct drm_gem_object
*obj
,
297 struct drm_device
*dev
= obj
->dev
;
300 WARN_ON(!mutex_is_locked(&dev
->object_name_lock
));
303 * Get the user-visible handle using idr. Preload and perform
304 * allocation under our spinlock.
306 idr_preload(GFP_KERNEL
);
307 spin_lock(&file_priv
->table_lock
);
309 ret
= idr_alloc(&file_priv
->object_idr
, obj
, 1, 0, GFP_NOWAIT
);
310 drm_gem_object_reference(obj
);
312 spin_unlock(&file_priv
->table_lock
);
314 mutex_unlock(&dev
->object_name_lock
);
316 drm_gem_object_handle_unreference_unlocked(obj
);
321 ret
= drm_vma_node_allow(&obj
->vma_node
, file_priv
->filp
);
323 drm_gem_handle_delete(file_priv
, *handlep
);
327 if (dev
->driver
->gem_open_object
) {
328 ret
= dev
->driver
->gem_open_object(obj
, file_priv
);
330 drm_gem_handle_delete(file_priv
, *handlep
);
339 * Create a handle for this object. This adds a handle reference
340 * to the object, which includes a regular reference count. Callers
341 * will likely want to dereference the object afterwards.
344 drm_gem_handle_create(struct drm_file
*file_priv
,
345 struct drm_gem_object
*obj
,
348 mutex_lock(&obj
->dev
->object_name_lock
);
350 return drm_gem_handle_create_tail(file_priv
, obj
, handlep
);
352 EXPORT_SYMBOL(drm_gem_handle_create
);
356 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
357 * @obj: obj in question
359 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
362 drm_gem_free_mmap_offset(struct drm_gem_object
*obj
)
364 struct drm_device
*dev
= obj
->dev
;
366 drm_vma_offset_remove(dev
->vma_offset_manager
, &obj
->vma_node
);
368 EXPORT_SYMBOL(drm_gem_free_mmap_offset
);
371 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
372 * @obj: obj in question
373 * @size: the virtual size
375 * GEM memory mapping works by handing back to userspace a fake mmap offset
376 * it can use in a subsequent mmap(2) call. The DRM core code then looks
377 * up the object based on the offset and sets up the various memory mapping
380 * This routine allocates and attaches a fake offset for @obj, in cases where
381 * the virtual size differs from the physical size (ie. obj->size). Otherwise
382 * just use drm_gem_create_mmap_offset().
385 drm_gem_create_mmap_offset_size(struct drm_gem_object
*obj
, size_t size
)
387 struct drm_device
*dev
= obj
->dev
;
389 return drm_vma_offset_add(dev
->vma_offset_manager
, &obj
->vma_node
,
392 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size
);
395 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
396 * @obj: obj in question
398 * GEM memory mapping works by handing back to userspace a fake mmap offset
399 * it can use in a subsequent mmap(2) call. The DRM core code then looks
400 * up the object based on the offset and sets up the various memory mapping
403 * This routine allocates and attaches a fake offset for @obj.
405 int drm_gem_create_mmap_offset(struct drm_gem_object
*obj
)
407 return drm_gem_create_mmap_offset_size(obj
, obj
->size
);
409 EXPORT_SYMBOL(drm_gem_create_mmap_offset
);
412 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
414 * @obj: obj in question
415 * @gfpmask: gfp mask of requested pages
417 struct page
**drm_gem_get_pages(struct drm_gem_object
*obj
, gfp_t gfpmask
)
420 struct address_space
*mapping
;
421 struct page
*p
, **pages
;
424 /* This is the shared memory object that backs the GEM resource */
425 inode
= file_inode(obj
->filp
);
426 mapping
= inode
->i_mapping
;
428 /* We already BUG_ON() for non-page-aligned sizes in
429 * drm_gem_object_init(), so we should never hit this unless
430 * driver author is doing something really wrong:
432 WARN_ON((obj
->size
& (PAGE_SIZE
- 1)) != 0);
434 npages
= obj
->size
>> PAGE_SHIFT
;
436 pages
= drm_malloc_ab(npages
, sizeof(struct page
*));
438 return ERR_PTR(-ENOMEM
);
440 gfpmask
|= mapping_gfp_mask(mapping
);
442 for (i
= 0; i
< npages
; i
++) {
443 p
= shmem_read_mapping_page_gfp(mapping
, i
, gfpmask
);
448 /* There is a hypothetical issue w/ drivers that require
449 * buffer memory in the low 4GB.. if the pages are un-
450 * pinned, and swapped out, they can end up swapped back
451 * in above 4GB. If pages are already in memory, then
452 * shmem_read_mapping_page_gfp will ignore the gfpmask,
453 * even if the already in-memory page disobeys the mask.
455 * It is only a theoretical issue today, because none of
456 * the devices with this limitation can be populated with
457 * enough memory to trigger the issue. But this BUG_ON()
458 * is here as a reminder in case the problem with
459 * shmem_read_mapping_page_gfp() isn't solved by the time
460 * it does become a real issue.
462 * See this thread: http://lkml.org/lkml/2011/7/11/238
464 BUG_ON((gfpmask
& __GFP_DMA32
) &&
465 (page_to_pfn(p
) >= 0x00100000UL
));
472 page_cache_release(pages
[i
]);
474 drm_free_large(pages
);
477 EXPORT_SYMBOL(drm_gem_get_pages
);
480 * drm_gem_put_pages - helper to free backing pages for a GEM object
481 * @obj: obj in question
482 * @pages: pages to free
483 * @dirty: if true, pages will be marked as dirty
484 * @accessed: if true, the pages will be marked as accessed
486 void drm_gem_put_pages(struct drm_gem_object
*obj
, struct page
**pages
,
487 bool dirty
, bool accessed
)
491 /* We already BUG_ON() for non-page-aligned sizes in
492 * drm_gem_object_init(), so we should never hit this unless
493 * driver author is doing something really wrong:
495 WARN_ON((obj
->size
& (PAGE_SIZE
- 1)) != 0);
497 npages
= obj
->size
>> PAGE_SHIFT
;
499 for (i
= 0; i
< npages
; i
++) {
501 set_page_dirty(pages
[i
]);
504 mark_page_accessed(pages
[i
]);
506 /* Undo the reference we took when populating the table */
507 page_cache_release(pages
[i
]);
510 drm_free_large(pages
);
512 EXPORT_SYMBOL(drm_gem_put_pages
);
514 /** Returns a reference to the object named by the handle. */
515 struct drm_gem_object
*
516 drm_gem_object_lookup(struct drm_device
*dev
, struct drm_file
*filp
,
519 struct drm_gem_object
*obj
;
521 spin_lock(&filp
->table_lock
);
523 /* Check if we currently have a reference on the object */
524 obj
= idr_find(&filp
->object_idr
, handle
);
526 spin_unlock(&filp
->table_lock
);
530 drm_gem_object_reference(obj
);
532 spin_unlock(&filp
->table_lock
);
536 EXPORT_SYMBOL(drm_gem_object_lookup
);
539 * Releases the handle to an mm object.
542 drm_gem_close_ioctl(struct drm_device
*dev
, void *data
,
543 struct drm_file
*file_priv
)
545 struct drm_gem_close
*args
= data
;
548 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
551 ret
= drm_gem_handle_delete(file_priv
, args
->handle
);
557 * Create a global name for an object, returning the name.
559 * Note that the name does not hold a reference; when the object
560 * is freed, the name goes away.
563 drm_gem_flink_ioctl(struct drm_device
*dev
, void *data
,
564 struct drm_file
*file_priv
)
566 struct drm_gem_flink
*args
= data
;
567 struct drm_gem_object
*obj
;
570 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
573 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
577 mutex_lock(&dev
->object_name_lock
);
578 idr_preload(GFP_KERNEL
);
579 /* prevent races with concurrent gem_close. */
580 if (obj
->handle_count
== 0) {
586 ret
= idr_alloc(&dev
->object_name_idr
, obj
, 1, 0, GFP_NOWAIT
);
593 args
->name
= (uint64_t) obj
->name
;
598 mutex_unlock(&dev
->object_name_lock
);
599 drm_gem_object_unreference_unlocked(obj
);
604 * Open an object using the global name, returning a handle and the size.
606 * This handle (of course) holds a reference to the object, so the object
607 * will not go away until the handle is deleted.
610 drm_gem_open_ioctl(struct drm_device
*dev
, void *data
,
611 struct drm_file
*file_priv
)
613 struct drm_gem_open
*args
= data
;
614 struct drm_gem_object
*obj
;
618 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
621 mutex_lock(&dev
->object_name_lock
);
622 obj
= idr_find(&dev
->object_name_idr
, (int) args
->name
);
624 drm_gem_object_reference(obj
);
626 mutex_unlock(&dev
->object_name_lock
);
630 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
631 ret
= drm_gem_handle_create_tail(file_priv
, obj
, &handle
);
632 drm_gem_object_unreference_unlocked(obj
);
636 args
->handle
= handle
;
637 args
->size
= obj
->size
;
643 * Called at device open time, sets up the structure for handling refcounting
647 drm_gem_open(struct drm_device
*dev
, struct drm_file
*file_private
)
649 idr_init(&file_private
->object_idr
);
650 spin_lock_init(&file_private
->table_lock
);
654 * Called at device close to release the file's
655 * handle references on objects.
658 drm_gem_object_release_handle(int id
, void *ptr
, void *data
)
660 struct drm_file
*file_priv
= data
;
661 struct drm_gem_object
*obj
= ptr
;
662 struct drm_device
*dev
= obj
->dev
;
664 if (drm_core_check_feature(dev
, DRIVER_PRIME
))
665 drm_gem_remove_prime_handles(obj
, file_priv
);
666 drm_vma_node_revoke(&obj
->vma_node
, file_priv
->filp
);
668 if (dev
->driver
->gem_close_object
)
669 dev
->driver
->gem_close_object(obj
, file_priv
);
671 drm_gem_object_handle_unreference_unlocked(obj
);
677 * Called at close time when the filp is going away.
679 * Releases any remaining references on objects by this filp.
682 drm_gem_release(struct drm_device
*dev
, struct drm_file
*file_private
)
684 idr_for_each(&file_private
->object_idr
,
685 &drm_gem_object_release_handle
, file_private
);
686 idr_destroy(&file_private
->object_idr
);
690 drm_gem_object_release(struct drm_gem_object
*obj
)
692 WARN_ON(obj
->dma_buf
);
697 EXPORT_SYMBOL(drm_gem_object_release
);
700 * Called after the last reference to the object has been lost.
701 * Must be called holding struct_ mutex
706 drm_gem_object_free(struct kref
*kref
)
708 struct drm_gem_object
*obj
= (struct drm_gem_object
*) kref
;
709 struct drm_device
*dev
= obj
->dev
;
711 BUG_ON(!mutex_is_locked(&dev
->struct_mutex
));
713 if (dev
->driver
->gem_free_object
!= NULL
)
714 dev
->driver
->gem_free_object(obj
);
716 EXPORT_SYMBOL(drm_gem_object_free
);
718 void drm_gem_vm_open(struct vm_area_struct
*vma
)
720 struct drm_gem_object
*obj
= vma
->vm_private_data
;
722 drm_gem_object_reference(obj
);
724 mutex_lock(&obj
->dev
->struct_mutex
);
725 drm_vm_open_locked(obj
->dev
, vma
);
726 mutex_unlock(&obj
->dev
->struct_mutex
);
728 EXPORT_SYMBOL(drm_gem_vm_open
);
730 void drm_gem_vm_close(struct vm_area_struct
*vma
)
732 struct drm_gem_object
*obj
= vma
->vm_private_data
;
733 struct drm_device
*dev
= obj
->dev
;
735 mutex_lock(&dev
->struct_mutex
);
736 drm_vm_close_locked(obj
->dev
, vma
);
737 drm_gem_object_unreference(obj
);
738 mutex_unlock(&dev
->struct_mutex
);
740 EXPORT_SYMBOL(drm_gem_vm_close
);
743 * drm_gem_mmap_obj - memory map a GEM object
744 * @obj: the GEM object to map
745 * @obj_size: the object size to be mapped, in bytes
746 * @vma: VMA for the area to be mapped
748 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
749 * provided by the driver. Depending on their requirements, drivers can either
750 * provide a fault handler in their gem_vm_ops (in which case any accesses to
751 * the object will be trapped, to perform migration, GTT binding, surface
752 * register allocation, or performance monitoring), or mmap the buffer memory
753 * synchronously after calling drm_gem_mmap_obj.
755 * This function is mainly intended to implement the DMABUF mmap operation, when
756 * the GEM object is not looked up based on its fake offset. To implement the
757 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
759 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
760 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
761 * callers must verify access restrictions before calling this helper.
763 * NOTE: This function has to be protected with dev->struct_mutex
765 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
766 * size, or if no gem_vm_ops are provided.
768 int drm_gem_mmap_obj(struct drm_gem_object
*obj
, unsigned long obj_size
,
769 struct vm_area_struct
*vma
)
771 struct drm_device
*dev
= obj
->dev
;
773 lockdep_assert_held(&dev
->struct_mutex
);
775 /* Check for valid size. */
776 if (obj_size
< vma
->vm_end
- vma
->vm_start
)
779 if (!dev
->driver
->gem_vm_ops
)
782 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
| VM_DONTEXPAND
| VM_DONTDUMP
;
783 vma
->vm_ops
= dev
->driver
->gem_vm_ops
;
784 vma
->vm_private_data
= obj
;
785 vma
->vm_page_prot
= pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
787 /* Take a ref for this mapping of the object, so that the fault
788 * handler can dereference the mmap offset's pointer to the object.
789 * This reference is cleaned up by the corresponding vm_close
790 * (which should happen whether the vma was created by this call, or
791 * by a vm_open due to mremap or partial unmap or whatever).
793 drm_gem_object_reference(obj
);
795 drm_vm_open_locked(dev
, vma
);
798 EXPORT_SYMBOL(drm_gem_mmap_obj
);
801 * drm_gem_mmap - memory map routine for GEM objects
802 * @filp: DRM file pointer
803 * @vma: VMA for the area to be mapped
805 * If a driver supports GEM object mapping, mmap calls on the DRM file
806 * descriptor will end up here.
808 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
809 * contain the fake offset we created when the GTT map ioctl was called on
810 * the object) and map it with a call to drm_gem_mmap_obj().
812 * If the caller is not granted access to the buffer object, the mmap will fail
813 * with EACCES. Please see the vma manager for more information.
815 int drm_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
817 struct drm_file
*priv
= filp
->private_data
;
818 struct drm_device
*dev
= priv
->minor
->dev
;
819 struct drm_gem_object
*obj
;
820 struct drm_vma_offset_node
*node
;
823 if (drm_device_is_unplugged(dev
))
826 mutex_lock(&dev
->struct_mutex
);
828 node
= drm_vma_offset_exact_lookup(dev
->vma_offset_manager
,
832 mutex_unlock(&dev
->struct_mutex
);
833 return drm_mmap(filp
, vma
);
834 } else if (!drm_vma_node_is_allowed(node
, filp
)) {
835 mutex_unlock(&dev
->struct_mutex
);
839 obj
= container_of(node
, struct drm_gem_object
, vma_node
);
840 ret
= drm_gem_mmap_obj(obj
, drm_vma_node_size(node
) << PAGE_SHIFT
, vma
);
842 mutex_unlock(&dev
->struct_mutex
);
846 EXPORT_SYMBOL(drm_gem_mmap
);