treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / drm_gem.c
bloba9e4a610445ac06dcf29c44829eeef60fa0c020e
1 /*
2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mem_encrypt.h>
40 #include <linux/pagevec.h>
42 #include <drm/drm.h>
43 #include <drm/drm_device.h>
44 #include <drm/drm_drv.h>
45 #include <drm/drm_file.h>
46 #include <drm/drm_gem.h>
47 #include <drm/drm_print.h>
48 #include <drm/drm_vma_manager.h>
50 #include "drm_internal.h"
52 /** @file drm_gem.c
54 * This file provides some of the base ioctls and library routines for
55 * the graphics memory manager implemented by each device driver.
57 * Because various devices have different requirements in terms of
58 * synchronization and migration strategies, implementing that is left up to
59 * the driver, and all that the general API provides should be generic --
60 * allocating objects, reading/writing data with the cpu, freeing objects.
61 * Even there, platform-dependent optimizations for reading/writing data with
62 * the CPU mean we'll likely hook those out to driver-specific calls. However,
63 * the DRI2 implementation wants to have at least allocate/mmap be generic.
65 * The goal was to have swap-backed object allocation managed through
66 * struct file. However, file descriptors as handles to a struct file have
67 * two major failings:
68 * - Process limits prevent more than 1024 or so being used at a time by
69 * default.
70 * - Inability to allocate high fds will aggravate the X Server's select()
71 * handling, and likely that of many GL client applications as well.
73 * This led to a plan of using our own integer IDs (called handles, following
74 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
75 * ioctls. The objects themselves will still include the struct file so
76 * that we can transition to fds if the required kernel infrastructure shows
77 * up at a later date, and as our interface with shmfs for memory allocation.
80 /**
81 * drm_gem_init - Initialize the GEM device fields
82 * @dev: drm_devic structure to initialize
84 int
85 drm_gem_init(struct drm_device *dev)
87 struct drm_vma_offset_manager *vma_offset_manager;
89 mutex_init(&dev->object_name_lock);
90 idr_init_base(&dev->object_name_idr, 1);
92 vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
93 if (!vma_offset_manager) {
94 DRM_ERROR("out of memory\n");
95 return -ENOMEM;
98 dev->vma_offset_manager = vma_offset_manager;
99 drm_vma_offset_manager_init(vma_offset_manager,
100 DRM_FILE_PAGE_OFFSET_START,
101 DRM_FILE_PAGE_OFFSET_SIZE);
103 return 0;
106 void
107 drm_gem_destroy(struct drm_device *dev)
110 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
111 kfree(dev->vma_offset_manager);
112 dev->vma_offset_manager = NULL;
116 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
117 * @dev: drm_device the object should be initialized for
118 * @obj: drm_gem_object to initialize
119 * @size: object size
121 * Initialize an already allocated GEM object of the specified size with
122 * shmfs backing store.
124 int drm_gem_object_init(struct drm_device *dev,
125 struct drm_gem_object *obj, size_t size)
127 struct file *filp;
129 drm_gem_private_object_init(dev, obj, size);
131 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
132 if (IS_ERR(filp))
133 return PTR_ERR(filp);
135 obj->filp = filp;
137 return 0;
139 EXPORT_SYMBOL(drm_gem_object_init);
142 * drm_gem_private_object_init - initialize an allocated private GEM object
143 * @dev: drm_device the object should be initialized for
144 * @obj: drm_gem_object to initialize
145 * @size: object size
147 * Initialize an already allocated GEM object of the specified size with
148 * no GEM provided backing store. Instead the caller is responsible for
149 * backing the object and handling it.
151 void drm_gem_private_object_init(struct drm_device *dev,
152 struct drm_gem_object *obj, size_t size)
154 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
156 obj->dev = dev;
157 obj->filp = NULL;
159 kref_init(&obj->refcount);
160 obj->handle_count = 0;
161 obj->size = size;
162 dma_resv_init(&obj->_resv);
163 if (!obj->resv)
164 obj->resv = &obj->_resv;
166 drm_vma_node_reset(&obj->vma_node);
168 EXPORT_SYMBOL(drm_gem_private_object_init);
170 static void
171 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
174 * Note: obj->dma_buf can't disappear as long as we still hold a
175 * handle reference in obj->handle_count.
177 mutex_lock(&filp->prime.lock);
178 if (obj->dma_buf) {
179 drm_prime_remove_buf_handle_locked(&filp->prime,
180 obj->dma_buf);
182 mutex_unlock(&filp->prime.lock);
186 * drm_gem_object_handle_free - release resources bound to userspace handles
187 * @obj: GEM object to clean up.
189 * Called after the last handle to the object has been closed
191 * Removes any name for the object. Note that this must be
192 * called before drm_gem_object_free or we'll be touching
193 * freed memory
195 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
197 struct drm_device *dev = obj->dev;
199 /* Remove any name for this object */
200 if (obj->name) {
201 idr_remove(&dev->object_name_idr, obj->name);
202 obj->name = 0;
206 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
208 /* Unbreak the reference cycle if we have an exported dma_buf. */
209 if (obj->dma_buf) {
210 dma_buf_put(obj->dma_buf);
211 obj->dma_buf = NULL;
215 static void
216 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
218 struct drm_device *dev = obj->dev;
219 bool final = false;
221 if (WARN_ON(obj->handle_count == 0))
222 return;
225 * Must bump handle count first as this may be the last
226 * ref, in which case the object would disappear before we
227 * checked for a name
230 mutex_lock(&dev->object_name_lock);
231 if (--obj->handle_count == 0) {
232 drm_gem_object_handle_free(obj);
233 drm_gem_object_exported_dma_buf_free(obj);
234 final = true;
236 mutex_unlock(&dev->object_name_lock);
238 if (final)
239 drm_gem_object_put_unlocked(obj);
243 * Called at device or object close to release the file's
244 * handle references on objects.
246 static int
247 drm_gem_object_release_handle(int id, void *ptr, void *data)
249 struct drm_file *file_priv = data;
250 struct drm_gem_object *obj = ptr;
251 struct drm_device *dev = obj->dev;
253 if (obj->funcs && obj->funcs->close)
254 obj->funcs->close(obj, file_priv);
255 else if (dev->driver->gem_close_object)
256 dev->driver->gem_close_object(obj, file_priv);
258 drm_gem_remove_prime_handles(obj, file_priv);
259 drm_vma_node_revoke(&obj->vma_node, file_priv);
261 drm_gem_object_handle_put_unlocked(obj);
263 return 0;
267 * drm_gem_handle_delete - deletes the given file-private handle
268 * @filp: drm file-private structure to use for the handle look up
269 * @handle: userspace handle to delete
271 * Removes the GEM handle from the @filp lookup table which has been added with
272 * drm_gem_handle_create(). If this is the last handle also cleans up linked
273 * resources like GEM names.
276 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
278 struct drm_gem_object *obj;
280 spin_lock(&filp->table_lock);
282 /* Check if we currently have a reference on the object */
283 obj = idr_replace(&filp->object_idr, NULL, handle);
284 spin_unlock(&filp->table_lock);
285 if (IS_ERR_OR_NULL(obj))
286 return -EINVAL;
288 /* Release driver's reference and decrement refcount. */
289 drm_gem_object_release_handle(handle, obj, filp);
291 /* And finally make the handle available for future allocations. */
292 spin_lock(&filp->table_lock);
293 idr_remove(&filp->object_idr, handle);
294 spin_unlock(&filp->table_lock);
296 return 0;
298 EXPORT_SYMBOL(drm_gem_handle_delete);
301 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
302 * @file: drm file-private structure containing the gem object
303 * @dev: corresponding drm_device
304 * @handle: gem object handle
305 * @offset: return location for the fake mmap offset
307 * This implements the &drm_driver.dumb_map_offset kms driver callback for
308 * drivers which use gem to manage their backing storage.
310 * Returns:
311 * 0 on success or a negative error code on failure.
313 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
314 u32 handle, u64 *offset)
316 struct drm_gem_object *obj;
317 int ret;
319 obj = drm_gem_object_lookup(file, handle);
320 if (!obj)
321 return -ENOENT;
323 /* Don't allow imported objects to be mapped */
324 if (obj->import_attach) {
325 ret = -EINVAL;
326 goto out;
329 ret = drm_gem_create_mmap_offset(obj);
330 if (ret)
331 goto out;
333 *offset = drm_vma_node_offset_addr(&obj->vma_node);
334 out:
335 drm_gem_object_put_unlocked(obj);
337 return ret;
339 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
342 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
343 * @file: drm file-private structure to remove the dumb handle from
344 * @dev: corresponding drm_device
345 * @handle: the dumb handle to remove
347 * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
348 * which use gem to manage their backing storage.
350 int drm_gem_dumb_destroy(struct drm_file *file,
351 struct drm_device *dev,
352 uint32_t handle)
354 return drm_gem_handle_delete(file, handle);
356 EXPORT_SYMBOL(drm_gem_dumb_destroy);
359 * drm_gem_handle_create_tail - internal functions to create a handle
360 * @file_priv: drm file-private structure to register the handle for
361 * @obj: object to register
362 * @handlep: pointer to return the created handle to the caller
364 * This expects the &drm_device.object_name_lock to be held already and will
365 * drop it before returning. Used to avoid races in establishing new handles
366 * when importing an object from either an flink name or a dma-buf.
368 * Handles must be release again through drm_gem_handle_delete(). This is done
369 * when userspace closes @file_priv for all attached handles, or through the
370 * GEM_CLOSE ioctl for individual handles.
373 drm_gem_handle_create_tail(struct drm_file *file_priv,
374 struct drm_gem_object *obj,
375 u32 *handlep)
377 struct drm_device *dev = obj->dev;
378 u32 handle;
379 int ret;
381 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
382 if (obj->handle_count++ == 0)
383 drm_gem_object_get(obj);
386 * Get the user-visible handle using idr. Preload and perform
387 * allocation under our spinlock.
389 idr_preload(GFP_KERNEL);
390 spin_lock(&file_priv->table_lock);
392 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
394 spin_unlock(&file_priv->table_lock);
395 idr_preload_end();
397 mutex_unlock(&dev->object_name_lock);
398 if (ret < 0)
399 goto err_unref;
401 handle = ret;
403 ret = drm_vma_node_allow(&obj->vma_node, file_priv);
404 if (ret)
405 goto err_remove;
407 if (obj->funcs && obj->funcs->open) {
408 ret = obj->funcs->open(obj, file_priv);
409 if (ret)
410 goto err_revoke;
411 } else if (dev->driver->gem_open_object) {
412 ret = dev->driver->gem_open_object(obj, file_priv);
413 if (ret)
414 goto err_revoke;
417 *handlep = handle;
418 return 0;
420 err_revoke:
421 drm_vma_node_revoke(&obj->vma_node, file_priv);
422 err_remove:
423 spin_lock(&file_priv->table_lock);
424 idr_remove(&file_priv->object_idr, handle);
425 spin_unlock(&file_priv->table_lock);
426 err_unref:
427 drm_gem_object_handle_put_unlocked(obj);
428 return ret;
432 * drm_gem_handle_create - create a gem handle for an object
433 * @file_priv: drm file-private structure to register the handle for
434 * @obj: object to register
435 * @handlep: pionter to return the created handle to the caller
437 * Create a handle for this object. This adds a handle reference to the object,
438 * which includes a regular reference count. Callers will likely want to
439 * dereference the object afterwards.
441 * Since this publishes @obj to userspace it must be fully set up by this point,
442 * drivers must call this last in their buffer object creation callbacks.
444 int drm_gem_handle_create(struct drm_file *file_priv,
445 struct drm_gem_object *obj,
446 u32 *handlep)
448 mutex_lock(&obj->dev->object_name_lock);
450 return drm_gem_handle_create_tail(file_priv, obj, handlep);
452 EXPORT_SYMBOL(drm_gem_handle_create);
456 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
457 * @obj: obj in question
459 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
461 * Note that drm_gem_object_release() already calls this function, so drivers
462 * don't have to take care of releasing the mmap offset themselves when freeing
463 * the GEM object.
465 void
466 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
468 struct drm_device *dev = obj->dev;
470 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
472 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
475 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
476 * @obj: obj in question
477 * @size: the virtual size
479 * GEM memory mapping works by handing back to userspace a fake mmap offset
480 * it can use in a subsequent mmap(2) call. The DRM core code then looks
481 * up the object based on the offset and sets up the various memory mapping
482 * structures.
484 * This routine allocates and attaches a fake offset for @obj, in cases where
485 * the virtual size differs from the physical size (ie. &drm_gem_object.size).
486 * Otherwise just use drm_gem_create_mmap_offset().
488 * This function is idempotent and handles an already allocated mmap offset
489 * transparently. Drivers do not need to check for this case.
492 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
494 struct drm_device *dev = obj->dev;
496 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
497 size / PAGE_SIZE);
499 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
502 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
503 * @obj: obj in question
505 * GEM memory mapping works by handing back to userspace a fake mmap offset
506 * it can use in a subsequent mmap(2) call. The DRM core code then looks
507 * up the object based on the offset and sets up the various memory mapping
508 * structures.
510 * This routine allocates and attaches a fake offset for @obj.
512 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
513 * the fake offset again.
515 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
517 return drm_gem_create_mmap_offset_size(obj, obj->size);
519 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
522 * Move pages to appropriate lru and release the pagevec, decrementing the
523 * ref count of those pages.
525 static void drm_gem_check_release_pagevec(struct pagevec *pvec)
527 check_move_unevictable_pages(pvec);
528 __pagevec_release(pvec);
529 cond_resched();
533 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
534 * from shmem
535 * @obj: obj in question
537 * This reads the page-array of the shmem-backing storage of the given gem
538 * object. An array of pages is returned. If a page is not allocated or
539 * swapped-out, this will allocate/swap-in the required pages. Note that the
540 * whole object is covered by the page-array and pinned in memory.
542 * Use drm_gem_put_pages() to release the array and unpin all pages.
544 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
545 * If you require other GFP-masks, you have to do those allocations yourself.
547 * Note that you are not allowed to change gfp-zones during runtime. That is,
548 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
549 * set during initialization. If you have special zone constraints, set them
550 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
551 * to keep pages in the required zone during swap-in.
553 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
555 struct address_space *mapping;
556 struct page *p, **pages;
557 struct pagevec pvec;
558 int i, npages;
560 /* This is the shared memory object that backs the GEM resource */
561 mapping = obj->filp->f_mapping;
563 /* We already BUG_ON() for non-page-aligned sizes in
564 * drm_gem_object_init(), so we should never hit this unless
565 * driver author is doing something really wrong:
567 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
569 npages = obj->size >> PAGE_SHIFT;
571 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
572 if (pages == NULL)
573 return ERR_PTR(-ENOMEM);
575 mapping_set_unevictable(mapping);
577 for (i = 0; i < npages; i++) {
578 p = shmem_read_mapping_page(mapping, i);
579 if (IS_ERR(p))
580 goto fail;
581 pages[i] = p;
583 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
584 * correct region during swapin. Note that this requires
585 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
586 * so shmem can relocate pages during swapin if required.
588 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
589 (page_to_pfn(p) >= 0x00100000UL));
592 return pages;
594 fail:
595 mapping_clear_unevictable(mapping);
596 pagevec_init(&pvec);
597 while (i--) {
598 if (!pagevec_add(&pvec, pages[i]))
599 drm_gem_check_release_pagevec(&pvec);
601 if (pagevec_count(&pvec))
602 drm_gem_check_release_pagevec(&pvec);
604 kvfree(pages);
605 return ERR_CAST(p);
607 EXPORT_SYMBOL(drm_gem_get_pages);
610 * drm_gem_put_pages - helper to free backing pages for a GEM object
611 * @obj: obj in question
612 * @pages: pages to free
613 * @dirty: if true, pages will be marked as dirty
614 * @accessed: if true, the pages will be marked as accessed
616 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
617 bool dirty, bool accessed)
619 int i, npages;
620 struct address_space *mapping;
621 struct pagevec pvec;
623 mapping = file_inode(obj->filp)->i_mapping;
624 mapping_clear_unevictable(mapping);
626 /* We already BUG_ON() for non-page-aligned sizes in
627 * drm_gem_object_init(), so we should never hit this unless
628 * driver author is doing something really wrong:
630 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
632 npages = obj->size >> PAGE_SHIFT;
634 pagevec_init(&pvec);
635 for (i = 0; i < npages; i++) {
636 if (!pages[i])
637 continue;
639 if (dirty)
640 set_page_dirty(pages[i]);
642 if (accessed)
643 mark_page_accessed(pages[i]);
645 /* Undo the reference we took when populating the table */
646 if (!pagevec_add(&pvec, pages[i]))
647 drm_gem_check_release_pagevec(&pvec);
649 if (pagevec_count(&pvec))
650 drm_gem_check_release_pagevec(&pvec);
652 kvfree(pages);
654 EXPORT_SYMBOL(drm_gem_put_pages);
656 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
657 struct drm_gem_object **objs)
659 int i, ret = 0;
660 struct drm_gem_object *obj;
662 spin_lock(&filp->table_lock);
664 for (i = 0; i < count; i++) {
665 /* Check if we currently have a reference on the object */
666 obj = idr_find(&filp->object_idr, handle[i]);
667 if (!obj) {
668 ret = -ENOENT;
669 break;
671 drm_gem_object_get(obj);
672 objs[i] = obj;
674 spin_unlock(&filp->table_lock);
676 return ret;
680 * drm_gem_objects_lookup - look up GEM objects from an array of handles
681 * @filp: DRM file private date
682 * @bo_handles: user pointer to array of userspace handle
683 * @count: size of handle array
684 * @objs_out: returned pointer to array of drm_gem_object pointers
686 * Takes an array of userspace handles and returns a newly allocated array of
687 * GEM objects.
689 * For a single handle lookup, use drm_gem_object_lookup().
691 * Returns:
693 * @objs filled in with GEM object pointers. Returned GEM objects need to be
694 * released with drm_gem_object_put(). -ENOENT is returned on a lookup
695 * failure. 0 is returned on success.
698 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
699 int count, struct drm_gem_object ***objs_out)
701 int ret;
702 u32 *handles;
703 struct drm_gem_object **objs;
705 if (!count)
706 return 0;
708 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
709 GFP_KERNEL | __GFP_ZERO);
710 if (!objs)
711 return -ENOMEM;
713 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
714 if (!handles) {
715 ret = -ENOMEM;
716 goto out;
719 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
720 ret = -EFAULT;
721 DRM_DEBUG("Failed to copy in GEM handles\n");
722 goto out;
725 ret = objects_lookup(filp, handles, count, objs);
726 *objs_out = objs;
728 out:
729 kvfree(handles);
730 return ret;
733 EXPORT_SYMBOL(drm_gem_objects_lookup);
736 * drm_gem_object_lookup - look up a GEM object from its handle
737 * @filp: DRM file private date
738 * @handle: userspace handle
740 * Returns:
742 * A reference to the object named by the handle if such exists on @filp, NULL
743 * otherwise.
745 * If looking up an array of handles, use drm_gem_objects_lookup().
747 struct drm_gem_object *
748 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
750 struct drm_gem_object *obj = NULL;
752 objects_lookup(filp, &handle, 1, &obj);
753 return obj;
755 EXPORT_SYMBOL(drm_gem_object_lookup);
758 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
759 * shared and/or exclusive fences.
760 * @filep: DRM file private date
761 * @handle: userspace handle
762 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
763 * @timeout: timeout value in jiffies or zero to return immediately
765 * Returns:
767 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
768 * greater than 0 on success.
770 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
771 bool wait_all, unsigned long timeout)
773 long ret;
774 struct drm_gem_object *obj;
776 obj = drm_gem_object_lookup(filep, handle);
777 if (!obj) {
778 DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
779 return -EINVAL;
782 ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
783 true, timeout);
784 if (ret == 0)
785 ret = -ETIME;
786 else if (ret > 0)
787 ret = 0;
789 drm_gem_object_put_unlocked(obj);
791 return ret;
793 EXPORT_SYMBOL(drm_gem_dma_resv_wait);
796 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
797 * @dev: drm_device
798 * @data: ioctl data
799 * @file_priv: drm file-private structure
801 * Releases the handle to an mm object.
804 drm_gem_close_ioctl(struct drm_device *dev, void *data,
805 struct drm_file *file_priv)
807 struct drm_gem_close *args = data;
808 int ret;
810 if (!drm_core_check_feature(dev, DRIVER_GEM))
811 return -EOPNOTSUPP;
813 ret = drm_gem_handle_delete(file_priv, args->handle);
815 return ret;
819 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
820 * @dev: drm_device
821 * @data: ioctl data
822 * @file_priv: drm file-private structure
824 * Create a global name for an object, returning the name.
826 * Note that the name does not hold a reference; when the object
827 * is freed, the name goes away.
830 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
831 struct drm_file *file_priv)
833 struct drm_gem_flink *args = data;
834 struct drm_gem_object *obj;
835 int ret;
837 if (!drm_core_check_feature(dev, DRIVER_GEM))
838 return -EOPNOTSUPP;
840 obj = drm_gem_object_lookup(file_priv, args->handle);
841 if (obj == NULL)
842 return -ENOENT;
844 mutex_lock(&dev->object_name_lock);
845 /* prevent races with concurrent gem_close. */
846 if (obj->handle_count == 0) {
847 ret = -ENOENT;
848 goto err;
851 if (!obj->name) {
852 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
853 if (ret < 0)
854 goto err;
856 obj->name = ret;
859 args->name = (uint64_t) obj->name;
860 ret = 0;
862 err:
863 mutex_unlock(&dev->object_name_lock);
864 drm_gem_object_put_unlocked(obj);
865 return ret;
869 * drm_gem_open - implementation of the GEM_OPEN ioctl
870 * @dev: drm_device
871 * @data: ioctl data
872 * @file_priv: drm file-private structure
874 * Open an object using the global name, returning a handle and the size.
876 * This handle (of course) holds a reference to the object, so the object
877 * will not go away until the handle is deleted.
880 drm_gem_open_ioctl(struct drm_device *dev, void *data,
881 struct drm_file *file_priv)
883 struct drm_gem_open *args = data;
884 struct drm_gem_object *obj;
885 int ret;
886 u32 handle;
888 if (!drm_core_check_feature(dev, DRIVER_GEM))
889 return -EOPNOTSUPP;
891 mutex_lock(&dev->object_name_lock);
892 obj = idr_find(&dev->object_name_idr, (int) args->name);
893 if (obj) {
894 drm_gem_object_get(obj);
895 } else {
896 mutex_unlock(&dev->object_name_lock);
897 return -ENOENT;
900 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
901 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
902 drm_gem_object_put_unlocked(obj);
903 if (ret)
904 return ret;
906 args->handle = handle;
907 args->size = obj->size;
909 return 0;
913 * gem_gem_open - initalizes GEM file-private structures at devnode open time
914 * @dev: drm_device which is being opened by userspace
915 * @file_private: drm file-private structure to set up
917 * Called at device open time, sets up the structure for handling refcounting
918 * of mm objects.
920 void
921 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
923 idr_init_base(&file_private->object_idr, 1);
924 spin_lock_init(&file_private->table_lock);
928 * drm_gem_release - release file-private GEM resources
929 * @dev: drm_device which is being closed by userspace
930 * @file_private: drm file-private structure to clean up
932 * Called at close time when the filp is going away.
934 * Releases any remaining references on objects by this filp.
936 void
937 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
939 idr_for_each(&file_private->object_idr,
940 &drm_gem_object_release_handle, file_private);
941 idr_destroy(&file_private->object_idr);
945 * drm_gem_object_release - release GEM buffer object resources
946 * @obj: GEM buffer object
948 * This releases any structures and resources used by @obj and is the invers of
949 * drm_gem_object_init().
951 void
952 drm_gem_object_release(struct drm_gem_object *obj)
954 WARN_ON(obj->dma_buf);
956 if (obj->filp)
957 fput(obj->filp);
959 dma_resv_fini(&obj->_resv);
960 drm_gem_free_mmap_offset(obj);
962 EXPORT_SYMBOL(drm_gem_object_release);
965 * drm_gem_object_free - free a GEM object
966 * @kref: kref of the object to free
968 * Called after the last reference to the object has been lost.
969 * Must be called holding &drm_device.struct_mutex.
971 * Frees the object
973 void
974 drm_gem_object_free(struct kref *kref)
976 struct drm_gem_object *obj =
977 container_of(kref, struct drm_gem_object, refcount);
978 struct drm_device *dev = obj->dev;
980 if (obj->funcs) {
981 obj->funcs->free(obj);
982 } else if (dev->driver->gem_free_object_unlocked) {
983 dev->driver->gem_free_object_unlocked(obj);
984 } else if (dev->driver->gem_free_object) {
985 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
987 dev->driver->gem_free_object(obj);
990 EXPORT_SYMBOL(drm_gem_object_free);
993 * drm_gem_object_put_unlocked - drop a GEM buffer object reference
994 * @obj: GEM buffer object
996 * This releases a reference to @obj. Callers must not hold the
997 * &drm_device.struct_mutex lock when calling this function.
999 * See also __drm_gem_object_put().
1001 void
1002 drm_gem_object_put_unlocked(struct drm_gem_object *obj)
1004 struct drm_device *dev;
1006 if (!obj)
1007 return;
1009 dev = obj->dev;
1011 if (dev->driver->gem_free_object) {
1012 might_lock(&dev->struct_mutex);
1013 if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
1014 &dev->struct_mutex))
1015 mutex_unlock(&dev->struct_mutex);
1016 } else {
1017 kref_put(&obj->refcount, drm_gem_object_free);
1020 EXPORT_SYMBOL(drm_gem_object_put_unlocked);
1023 * drm_gem_object_put - release a GEM buffer object reference
1024 * @obj: GEM buffer object
1026 * This releases a reference to @obj. Callers must hold the
1027 * &drm_device.struct_mutex lock when calling this function, even when the
1028 * driver doesn't use &drm_device.struct_mutex for anything.
1030 * For drivers not encumbered with legacy locking use
1031 * drm_gem_object_put_unlocked() instead.
1033 void
1034 drm_gem_object_put(struct drm_gem_object *obj)
1036 if (obj) {
1037 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
1039 kref_put(&obj->refcount, drm_gem_object_free);
1042 EXPORT_SYMBOL(drm_gem_object_put);
1045 * drm_gem_vm_open - vma->ops->open implementation for GEM
1046 * @vma: VM area structure
1048 * This function implements the #vm_operations_struct open() callback for GEM
1049 * drivers. This must be used together with drm_gem_vm_close().
1051 void drm_gem_vm_open(struct vm_area_struct *vma)
1053 struct drm_gem_object *obj = vma->vm_private_data;
1055 drm_gem_object_get(obj);
1057 EXPORT_SYMBOL(drm_gem_vm_open);
1060 * drm_gem_vm_close - vma->ops->close implementation for GEM
1061 * @vma: VM area structure
1063 * This function implements the #vm_operations_struct close() callback for GEM
1064 * drivers. This must be used together with drm_gem_vm_open().
1066 void drm_gem_vm_close(struct vm_area_struct *vma)
1068 struct drm_gem_object *obj = vma->vm_private_data;
1070 drm_gem_object_put_unlocked(obj);
1072 EXPORT_SYMBOL(drm_gem_vm_close);
1075 * drm_gem_mmap_obj - memory map a GEM object
1076 * @obj: the GEM object to map
1077 * @obj_size: the object size to be mapped, in bytes
1078 * @vma: VMA for the area to be mapped
1080 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
1081 * provided by the driver. Depending on their requirements, drivers can either
1082 * provide a fault handler in their gem_vm_ops (in which case any accesses to
1083 * the object will be trapped, to perform migration, GTT binding, surface
1084 * register allocation, or performance monitoring), or mmap the buffer memory
1085 * synchronously after calling drm_gem_mmap_obj.
1087 * This function is mainly intended to implement the DMABUF mmap operation, when
1088 * the GEM object is not looked up based on its fake offset. To implement the
1089 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1091 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1092 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1093 * callers must verify access restrictions before calling this helper.
1095 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1096 * size, or if no gem_vm_ops are provided.
1098 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1099 struct vm_area_struct *vma)
1101 struct drm_device *dev = obj->dev;
1102 int ret;
1104 /* Check for valid size. */
1105 if (obj_size < vma->vm_end - vma->vm_start)
1106 return -EINVAL;
1108 /* Take a ref for this mapping of the object, so that the fault
1109 * handler can dereference the mmap offset's pointer to the object.
1110 * This reference is cleaned up by the corresponding vm_close
1111 * (which should happen whether the vma was created by this call, or
1112 * by a vm_open due to mremap or partial unmap or whatever).
1114 drm_gem_object_get(obj);
1116 if (obj->funcs && obj->funcs->mmap) {
1117 ret = obj->funcs->mmap(obj, vma);
1118 if (ret) {
1119 drm_gem_object_put_unlocked(obj);
1120 return ret;
1122 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1123 } else {
1124 if (obj->funcs && obj->funcs->vm_ops)
1125 vma->vm_ops = obj->funcs->vm_ops;
1126 else if (dev->driver->gem_vm_ops)
1127 vma->vm_ops = dev->driver->gem_vm_ops;
1128 else {
1129 drm_gem_object_put_unlocked(obj);
1130 return -EINVAL;
1133 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1134 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1135 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1138 vma->vm_private_data = obj;
1140 return 0;
1142 EXPORT_SYMBOL(drm_gem_mmap_obj);
1145 * drm_gem_mmap - memory map routine for GEM objects
1146 * @filp: DRM file pointer
1147 * @vma: VMA for the area to be mapped
1149 * If a driver supports GEM object mapping, mmap calls on the DRM file
1150 * descriptor will end up here.
1152 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1153 * contain the fake offset we created when the GTT map ioctl was called on
1154 * the object) and map it with a call to drm_gem_mmap_obj().
1156 * If the caller is not granted access to the buffer object, the mmap will fail
1157 * with EACCES. Please see the vma manager for more information.
1159 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1161 struct drm_file *priv = filp->private_data;
1162 struct drm_device *dev = priv->minor->dev;
1163 struct drm_gem_object *obj = NULL;
1164 struct drm_vma_offset_node *node;
1165 int ret;
1167 if (drm_dev_is_unplugged(dev))
1168 return -ENODEV;
1170 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1171 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1172 vma->vm_pgoff,
1173 vma_pages(vma));
1174 if (likely(node)) {
1175 obj = container_of(node, struct drm_gem_object, vma_node);
1177 * When the object is being freed, after it hits 0-refcnt it
1178 * proceeds to tear down the object. In the process it will
1179 * attempt to remove the VMA offset and so acquire this
1180 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
1181 * that matches our range, we know it is in the process of being
1182 * destroyed and will be freed as soon as we release the lock -
1183 * so we have to check for the 0-refcnted object and treat it as
1184 * invalid.
1186 if (!kref_get_unless_zero(&obj->refcount))
1187 obj = NULL;
1189 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1191 if (!obj)
1192 return -EINVAL;
1194 if (!drm_vma_node_is_allowed(node, priv)) {
1195 drm_gem_object_put_unlocked(obj);
1196 return -EACCES;
1199 if (node->readonly) {
1200 if (vma->vm_flags & VM_WRITE) {
1201 drm_gem_object_put_unlocked(obj);
1202 return -EINVAL;
1205 vma->vm_flags &= ~VM_MAYWRITE;
1208 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1209 vma);
1211 drm_gem_object_put_unlocked(obj);
1213 return ret;
1215 EXPORT_SYMBOL(drm_gem_mmap);
1217 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1218 const struct drm_gem_object *obj)
1220 drm_printf_indent(p, indent, "name=%d\n", obj->name);
1221 drm_printf_indent(p, indent, "refcount=%u\n",
1222 kref_read(&obj->refcount));
1223 drm_printf_indent(p, indent, "start=%08lx\n",
1224 drm_vma_node_start(&obj->vma_node));
1225 drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1226 drm_printf_indent(p, indent, "imported=%s\n",
1227 obj->import_attach ? "yes" : "no");
1229 if (obj->funcs && obj->funcs->print_info)
1230 obj->funcs->print_info(p, indent, obj);
1231 else if (obj->dev->driver->gem_print_info)
1232 obj->dev->driver->gem_print_info(p, indent, obj);
1235 int drm_gem_pin(struct drm_gem_object *obj)
1237 if (obj->funcs && obj->funcs->pin)
1238 return obj->funcs->pin(obj);
1239 else if (obj->dev->driver->gem_prime_pin)
1240 return obj->dev->driver->gem_prime_pin(obj);
1241 else
1242 return 0;
1245 void drm_gem_unpin(struct drm_gem_object *obj)
1247 if (obj->funcs && obj->funcs->unpin)
1248 obj->funcs->unpin(obj);
1249 else if (obj->dev->driver->gem_prime_unpin)
1250 obj->dev->driver->gem_prime_unpin(obj);
1253 void *drm_gem_vmap(struct drm_gem_object *obj)
1255 void *vaddr;
1257 if (obj->funcs && obj->funcs->vmap)
1258 vaddr = obj->funcs->vmap(obj);
1259 else if (obj->dev->driver->gem_prime_vmap)
1260 vaddr = obj->dev->driver->gem_prime_vmap(obj);
1261 else
1262 vaddr = ERR_PTR(-EOPNOTSUPP);
1264 if (!vaddr)
1265 vaddr = ERR_PTR(-ENOMEM);
1267 return vaddr;
1270 void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
1272 if (!vaddr)
1273 return;
1275 if (obj->funcs && obj->funcs->vunmap)
1276 obj->funcs->vunmap(obj, vaddr);
1277 else if (obj->dev->driver->gem_prime_vunmap)
1278 obj->dev->driver->gem_prime_vunmap(obj, vaddr);
1282 * drm_gem_lock_reservations - Sets up the ww context and acquires
1283 * the lock on an array of GEM objects.
1285 * Once you've locked your reservations, you'll want to set up space
1286 * for your shared fences (if applicable), submit your job, then
1287 * drm_gem_unlock_reservations().
1289 * @objs: drm_gem_objects to lock
1290 * @count: Number of objects in @objs
1291 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1292 * part of tracking this set of locked reservations.
1295 drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1296 struct ww_acquire_ctx *acquire_ctx)
1298 int contended = -1;
1299 int i, ret;
1301 ww_acquire_init(acquire_ctx, &reservation_ww_class);
1303 retry:
1304 if (contended != -1) {
1305 struct drm_gem_object *obj = objs[contended];
1307 ret = dma_resv_lock_slow_interruptible(obj->resv,
1308 acquire_ctx);
1309 if (ret) {
1310 ww_acquire_done(acquire_ctx);
1311 return ret;
1315 for (i = 0; i < count; i++) {
1316 if (i == contended)
1317 continue;
1319 ret = dma_resv_lock_interruptible(objs[i]->resv,
1320 acquire_ctx);
1321 if (ret) {
1322 int j;
1324 for (j = 0; j < i; j++)
1325 dma_resv_unlock(objs[j]->resv);
1327 if (contended != -1 && contended >= i)
1328 dma_resv_unlock(objs[contended]->resv);
1330 if (ret == -EDEADLK) {
1331 contended = i;
1332 goto retry;
1335 ww_acquire_done(acquire_ctx);
1336 return ret;
1340 ww_acquire_done(acquire_ctx);
1342 return 0;
1344 EXPORT_SYMBOL(drm_gem_lock_reservations);
1346 void
1347 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1348 struct ww_acquire_ctx *acquire_ctx)
1350 int i;
1352 for (i = 0; i < count; i++)
1353 dma_resv_unlock(objs[i]->resv);
1355 ww_acquire_fini(acquire_ctx);
1357 EXPORT_SYMBOL(drm_gem_unlock_reservations);
1360 * drm_gem_fence_array_add - Adds the fence to an array of fences to be
1361 * waited on, deduplicating fences from the same context.
1363 * @fence_array: array of dma_fence * for the job to block on.
1364 * @fence: the dma_fence to add to the list of dependencies.
1366 * Returns:
1367 * 0 on success, or an error on failing to expand the array.
1369 int drm_gem_fence_array_add(struct xarray *fence_array,
1370 struct dma_fence *fence)
1372 struct dma_fence *entry;
1373 unsigned long index;
1374 u32 id = 0;
1375 int ret;
1377 if (!fence)
1378 return 0;
1380 /* Deduplicate if we already depend on a fence from the same context.
1381 * This lets the size of the array of deps scale with the number of
1382 * engines involved, rather than the number of BOs.
1384 xa_for_each(fence_array, index, entry) {
1385 if (entry->context != fence->context)
1386 continue;
1388 if (dma_fence_is_later(fence, entry)) {
1389 dma_fence_put(entry);
1390 xa_store(fence_array, index, fence, GFP_KERNEL);
1391 } else {
1392 dma_fence_put(fence);
1394 return 0;
1397 ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
1398 if (ret != 0)
1399 dma_fence_put(fence);
1401 return ret;
1403 EXPORT_SYMBOL(drm_gem_fence_array_add);
1406 * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
1407 * in the GEM object's reservation object to an array of dma_fences for use in
1408 * scheduling a rendering job.
1410 * This should be called after drm_gem_lock_reservations() on your array of
1411 * GEM objects used in the job but before updating the reservations with your
1412 * own fences.
1414 * @fence_array: array of dma_fence * for the job to block on.
1415 * @obj: the gem object to add new dependencies from.
1416 * @write: whether the job might write the object (so we need to depend on
1417 * shared fences in the reservation object).
1419 int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
1420 struct drm_gem_object *obj,
1421 bool write)
1423 int ret;
1424 struct dma_fence **fences;
1425 unsigned int i, fence_count;
1427 if (!write) {
1428 struct dma_fence *fence =
1429 dma_resv_get_excl_rcu(obj->resv);
1431 return drm_gem_fence_array_add(fence_array, fence);
1434 ret = dma_resv_get_fences_rcu(obj->resv, NULL,
1435 &fence_count, &fences);
1436 if (ret || !fence_count)
1437 return ret;
1439 for (i = 0; i < fence_count; i++) {
1440 ret = drm_gem_fence_array_add(fence_array, fences[i]);
1441 if (ret)
1442 break;
1445 for (; i < fence_count; i++)
1446 dma_fence_put(fences[i]);
1447 kfree(fences);
1448 return ret;
1450 EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);