1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2018 Noralf Trønnes
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/mutex.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
14 #include <drm/drm_device.h>
15 #include <drm/drm_drv.h>
16 #include <drm/drm_gem_shmem_helper.h>
17 #include <drm/drm_prime.h>
18 #include <drm/drm_print.h>
23 * This library provides helpers for GEM objects backed by shmem buffers
24 * allocated using anonymous pageable memory.
27 static const struct drm_gem_object_funcs drm_gem_shmem_funcs
= {
28 .free
= drm_gem_shmem_free_object
,
29 .print_info
= drm_gem_shmem_print_info
,
30 .pin
= drm_gem_shmem_pin
,
31 .unpin
= drm_gem_shmem_unpin
,
32 .get_sg_table
= drm_gem_shmem_get_sg_table
,
33 .vmap
= drm_gem_shmem_vmap
,
34 .vunmap
= drm_gem_shmem_vunmap
,
35 .mmap
= drm_gem_shmem_mmap
,
38 static struct drm_gem_shmem_object
*
39 __drm_gem_shmem_create(struct drm_device
*dev
, size_t size
, bool private)
41 struct drm_gem_shmem_object
*shmem
;
42 struct drm_gem_object
*obj
;
45 size
= PAGE_ALIGN(size
);
47 if (dev
->driver
->gem_create_object
)
48 obj
= dev
->driver
->gem_create_object(dev
, size
);
50 obj
= kzalloc(sizeof(*shmem
), GFP_KERNEL
);
52 return ERR_PTR(-ENOMEM
);
54 shmem
= to_drm_gem_shmem_obj(obj
);
57 obj
->funcs
= &drm_gem_shmem_funcs
;
60 drm_gem_private_object_init(dev
, obj
, size
);
61 shmem
->map_wc
= false; /* dma-buf mappings use always writecombine */
63 ret
= drm_gem_object_init(dev
, obj
, size
);
68 ret
= drm_gem_create_mmap_offset(obj
);
72 mutex_init(&shmem
->pages_lock
);
73 mutex_init(&shmem
->vmap_lock
);
74 INIT_LIST_HEAD(&shmem
->madv_list
);
78 * Our buffers are kept pinned, so allocating them
79 * from the MOVABLE zone is a really bad idea, and
80 * conflicts with CMA. See comments above new_inode()
81 * why this is required _and_ expected if you're
82 * going to pin these pages.
84 mapping_set_gfp_mask(obj
->filp
->f_mapping
, GFP_HIGHUSER
|
85 __GFP_RETRY_MAYFAIL
| __GFP_NOWARN
);
91 drm_gem_object_release(obj
);
98 * drm_gem_shmem_create - Allocate an object with the given size
100 * @size: Size of the object to allocate
102 * This function creates a shmem GEM object.
105 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
106 * error code on failure.
108 struct drm_gem_shmem_object
*drm_gem_shmem_create(struct drm_device
*dev
, size_t size
)
110 return __drm_gem_shmem_create(dev
, size
, false);
112 EXPORT_SYMBOL_GPL(drm_gem_shmem_create
);
115 * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
116 * @obj: GEM object to free
118 * This function cleans up the GEM object state and frees the memory used to
119 * store the object itself. It should be used to implement
120 * &drm_gem_object_funcs.free.
122 void drm_gem_shmem_free_object(struct drm_gem_object
*obj
)
124 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
126 WARN_ON(shmem
->vmap_use_count
);
128 if (obj
->import_attach
) {
129 drm_prime_gem_destroy(obj
, shmem
->sgt
);
132 dma_unmap_sgtable(obj
->dev
->dev
, shmem
->sgt
,
133 DMA_BIDIRECTIONAL
, 0);
134 sg_free_table(shmem
->sgt
);
138 drm_gem_shmem_put_pages(shmem
);
141 WARN_ON(shmem
->pages_use_count
);
143 drm_gem_object_release(obj
);
144 mutex_destroy(&shmem
->pages_lock
);
145 mutex_destroy(&shmem
->vmap_lock
);
148 EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object
);
150 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object
*shmem
)
152 struct drm_gem_object
*obj
= &shmem
->base
;
155 if (shmem
->pages_use_count
++ > 0)
158 pages
= drm_gem_get_pages(obj
);
160 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages
));
161 shmem
->pages_use_count
= 0;
162 return PTR_ERR(pages
);
165 shmem
->pages
= pages
;
171 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
172 * @shmem: shmem GEM object
174 * This function makes sure that backing pages exists for the shmem GEM object
175 * and increases the use count.
178 * 0 on success or a negative error code on failure.
180 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object
*shmem
)
184 WARN_ON(shmem
->base
.import_attach
);
186 ret
= mutex_lock_interruptible(&shmem
->pages_lock
);
189 ret
= drm_gem_shmem_get_pages_locked(shmem
);
190 mutex_unlock(&shmem
->pages_lock
);
194 EXPORT_SYMBOL(drm_gem_shmem_get_pages
);
196 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object
*shmem
)
198 struct drm_gem_object
*obj
= &shmem
->base
;
200 if (WARN_ON_ONCE(!shmem
->pages_use_count
))
203 if (--shmem
->pages_use_count
> 0)
206 drm_gem_put_pages(obj
, shmem
->pages
,
207 shmem
->pages_mark_dirty_on_put
,
208 shmem
->pages_mark_accessed_on_put
);
213 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
214 * @shmem: shmem GEM object
216 * This function decreases the use count and puts the backing pages when use drops to zero.
218 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object
*shmem
)
220 mutex_lock(&shmem
->pages_lock
);
221 drm_gem_shmem_put_pages_locked(shmem
);
222 mutex_unlock(&shmem
->pages_lock
);
224 EXPORT_SYMBOL(drm_gem_shmem_put_pages
);
227 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
230 * This function makes sure the backing pages are pinned in memory while the
231 * buffer is exported. It should only be used to implement
232 * &drm_gem_object_funcs.pin.
235 * 0 on success or a negative error code on failure.
237 int drm_gem_shmem_pin(struct drm_gem_object
*obj
)
239 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
241 WARN_ON(shmem
->base
.import_attach
);
243 return drm_gem_shmem_get_pages(shmem
);
245 EXPORT_SYMBOL(drm_gem_shmem_pin
);
248 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
251 * This function removes the requirement that the backing pages are pinned in
252 * memory. It should only be used to implement &drm_gem_object_funcs.unpin.
254 void drm_gem_shmem_unpin(struct drm_gem_object
*obj
)
256 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
258 WARN_ON(shmem
->base
.import_attach
);
260 drm_gem_shmem_put_pages(shmem
);
262 EXPORT_SYMBOL(drm_gem_shmem_unpin
);
264 static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object
*shmem
, struct dma_buf_map
*map
)
266 struct drm_gem_object
*obj
= &shmem
->base
;
269 if (shmem
->vmap_use_count
++ > 0) {
270 dma_buf_map_set_vaddr(map
, shmem
->vaddr
);
274 if (obj
->import_attach
) {
275 ret
= dma_buf_vmap(obj
->import_attach
->dmabuf
, map
);
277 if (WARN_ON(map
->is_iomem
)) {
281 shmem
->vaddr
= map
->vaddr
;
284 pgprot_t prot
= PAGE_KERNEL
;
286 ret
= drm_gem_shmem_get_pages(shmem
);
291 prot
= pgprot_writecombine(prot
);
292 shmem
->vaddr
= vmap(shmem
->pages
, obj
->size
>> PAGE_SHIFT
,
297 dma_buf_map_set_vaddr(map
, shmem
->vaddr
);
301 DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret
);
308 if (!obj
->import_attach
)
309 drm_gem_shmem_put_pages(shmem
);
311 shmem
->vmap_use_count
= 0;
317 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
318 * @shmem: shmem GEM object
319 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
322 * This function makes sure that a contiguous kernel virtual address mapping
323 * exists for the buffer backing the shmem GEM object.
325 * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
326 * also be called by drivers directly, in which case it will hide the
327 * differences between dma-buf imported and natively allocated objects.
329 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
332 * 0 on success or a negative error code on failure.
334 int drm_gem_shmem_vmap(struct drm_gem_object
*obj
, struct dma_buf_map
*map
)
336 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
339 ret
= mutex_lock_interruptible(&shmem
->vmap_lock
);
342 ret
= drm_gem_shmem_vmap_locked(shmem
, map
);
343 mutex_unlock(&shmem
->vmap_lock
);
347 EXPORT_SYMBOL(drm_gem_shmem_vmap
);
349 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object
*shmem
,
350 struct dma_buf_map
*map
)
352 struct drm_gem_object
*obj
= &shmem
->base
;
354 if (WARN_ON_ONCE(!shmem
->vmap_use_count
))
357 if (--shmem
->vmap_use_count
> 0)
360 if (obj
->import_attach
)
361 dma_buf_vunmap(obj
->import_attach
->dmabuf
, map
);
363 vunmap(shmem
->vaddr
);
366 drm_gem_shmem_put_pages(shmem
);
370 * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
371 * @shmem: shmem GEM object
372 * @map: Kernel virtual address where the SHMEM GEM object was mapped
374 * This function cleans up a kernel virtual address mapping acquired by
375 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
378 * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
379 * also be called by drivers directly, in which case it will hide the
380 * differences between dma-buf imported and natively allocated objects.
382 void drm_gem_shmem_vunmap(struct drm_gem_object
*obj
, struct dma_buf_map
*map
)
384 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
386 mutex_lock(&shmem
->vmap_lock
);
387 drm_gem_shmem_vunmap_locked(shmem
, map
);
388 mutex_unlock(&shmem
->vmap_lock
);
390 EXPORT_SYMBOL(drm_gem_shmem_vunmap
);
392 struct drm_gem_shmem_object
*
393 drm_gem_shmem_create_with_handle(struct drm_file
*file_priv
,
394 struct drm_device
*dev
, size_t size
,
397 struct drm_gem_shmem_object
*shmem
;
400 shmem
= drm_gem_shmem_create(dev
, size
);
405 * Allocate an id of idr table where the obj is registered
406 * and handle has the id what user can see.
408 ret
= drm_gem_handle_create(file_priv
, &shmem
->base
, handle
);
409 /* drop reference from allocate - handle holds it now. */
410 drm_gem_object_put(&shmem
->base
);
416 EXPORT_SYMBOL(drm_gem_shmem_create_with_handle
);
418 /* Update madvise status, returns true if not purged, else
421 int drm_gem_shmem_madvise(struct drm_gem_object
*obj
, int madv
)
423 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
425 mutex_lock(&shmem
->pages_lock
);
427 if (shmem
->madv
>= 0)
432 mutex_unlock(&shmem
->pages_lock
);
436 EXPORT_SYMBOL(drm_gem_shmem_madvise
);
438 void drm_gem_shmem_purge_locked(struct drm_gem_object
*obj
)
440 struct drm_device
*dev
= obj
->dev
;
441 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
443 WARN_ON(!drm_gem_shmem_is_purgeable(shmem
));
445 dma_unmap_sgtable(obj
->dev
->dev
, shmem
->sgt
, DMA_BIDIRECTIONAL
, 0);
446 sg_free_table(shmem
->sgt
);
450 drm_gem_shmem_put_pages_locked(shmem
);
454 drm_vma_node_unmap(&obj
->vma_node
, dev
->anon_inode
->i_mapping
);
455 drm_gem_free_mmap_offset(obj
);
457 /* Our goal here is to return as much of the memory as
458 * is possible back to the system as we are called from OOM.
459 * To do this we must instruct the shmfs to drop all of its
460 * backing pages, *now*.
462 shmem_truncate_range(file_inode(obj
->filp
), 0, (loff_t
)-1);
464 invalidate_mapping_pages(file_inode(obj
->filp
)->i_mapping
,
467 EXPORT_SYMBOL(drm_gem_shmem_purge_locked
);
469 bool drm_gem_shmem_purge(struct drm_gem_object
*obj
)
471 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
473 if (!mutex_trylock(&shmem
->pages_lock
))
475 drm_gem_shmem_purge_locked(obj
);
476 mutex_unlock(&shmem
->pages_lock
);
480 EXPORT_SYMBOL(drm_gem_shmem_purge
);
483 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
484 * @file: DRM file structure to create the dumb buffer for
488 * This function computes the pitch of the dumb buffer and rounds it up to an
489 * integer number of bytes per pixel. Drivers for hardware that doesn't have
490 * any additional restrictions on the pitch can directly use this function as
491 * their &drm_driver.dumb_create callback.
493 * For hardware with additional restrictions, drivers can adjust the fields
494 * set up by userspace before calling into this function.
497 * 0 on success or a negative error code on failure.
499 int drm_gem_shmem_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
500 struct drm_mode_create_dumb
*args
)
502 u32 min_pitch
= DIV_ROUND_UP(args
->width
* args
->bpp
, 8);
503 struct drm_gem_shmem_object
*shmem
;
505 if (!args
->pitch
|| !args
->size
) {
506 args
->pitch
= min_pitch
;
507 args
->size
= args
->pitch
* args
->height
;
509 /* ensure sane minimum values */
510 if (args
->pitch
< min_pitch
)
511 args
->pitch
= min_pitch
;
512 if (args
->size
< args
->pitch
* args
->height
)
513 args
->size
= args
->pitch
* args
->height
;
516 shmem
= drm_gem_shmem_create_with_handle(file
, dev
, args
->size
, &args
->handle
);
518 return PTR_ERR_OR_ZERO(shmem
);
520 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create
);
522 static vm_fault_t
drm_gem_shmem_fault(struct vm_fault
*vmf
)
524 struct vm_area_struct
*vma
= vmf
->vma
;
525 struct drm_gem_object
*obj
= vma
->vm_private_data
;
526 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
527 loff_t num_pages
= obj
->size
>> PAGE_SHIFT
;
530 if (vmf
->pgoff
>= num_pages
|| WARN_ON_ONCE(!shmem
->pages
))
531 return VM_FAULT_SIGBUS
;
533 page
= shmem
->pages
[vmf
->pgoff
];
535 return vmf_insert_page(vma
, vmf
->address
, page
);
538 static void drm_gem_shmem_vm_open(struct vm_area_struct
*vma
)
540 struct drm_gem_object
*obj
= vma
->vm_private_data
;
541 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
544 WARN_ON(shmem
->base
.import_attach
);
546 ret
= drm_gem_shmem_get_pages(shmem
);
547 WARN_ON_ONCE(ret
!= 0);
549 drm_gem_vm_open(vma
);
552 static void drm_gem_shmem_vm_close(struct vm_area_struct
*vma
)
554 struct drm_gem_object
*obj
= vma
->vm_private_data
;
555 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
557 drm_gem_shmem_put_pages(shmem
);
558 drm_gem_vm_close(vma
);
561 static const struct vm_operations_struct drm_gem_shmem_vm_ops
= {
562 .fault
= drm_gem_shmem_fault
,
563 .open
= drm_gem_shmem_vm_open
,
564 .close
= drm_gem_shmem_vm_close
,
568 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
570 * @vma: VMA for the area to be mapped
572 * This function implements an augmented version of the GEM DRM file mmap
573 * operation for shmem objects. Drivers which employ the shmem helpers should
574 * use this function as their &drm_gem_object_funcs.mmap handler.
577 * 0 on success or a negative error code on failure.
579 int drm_gem_shmem_mmap(struct drm_gem_object
*obj
, struct vm_area_struct
*vma
)
581 struct drm_gem_shmem_object
*shmem
;
584 /* Remove the fake offset */
585 vma
->vm_pgoff
-= drm_vma_node_start(&obj
->vma_node
);
587 if (obj
->import_attach
) {
588 /* Drop the reference drm_gem_mmap_obj() acquired.*/
589 drm_gem_object_put(obj
);
590 vma
->vm_private_data
= NULL
;
592 return dma_buf_mmap(obj
->dma_buf
, vma
, 0);
595 shmem
= to_drm_gem_shmem_obj(obj
);
597 ret
= drm_gem_shmem_get_pages(shmem
);
599 drm_gem_vm_close(vma
);
603 vma
->vm_flags
|= VM_MIXEDMAP
| VM_DONTEXPAND
;
604 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
606 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
607 vma
->vm_ops
= &drm_gem_shmem_vm_ops
;
611 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap
);
614 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
616 * @indent: Tab indentation level
619 * This implements the &drm_gem_object_funcs.info callback.
621 void drm_gem_shmem_print_info(struct drm_printer
*p
, unsigned int indent
,
622 const struct drm_gem_object
*obj
)
624 const struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
626 drm_printf_indent(p
, indent
, "pages_use_count=%u\n", shmem
->pages_use_count
);
627 drm_printf_indent(p
, indent
, "vmap_use_count=%u\n", shmem
->vmap_use_count
);
628 drm_printf_indent(p
, indent
, "vaddr=%p\n", shmem
->vaddr
);
630 EXPORT_SYMBOL(drm_gem_shmem_print_info
);
633 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
634 * pages for a shmem GEM object
637 * This function exports a scatter/gather table suitable for PRIME usage by
638 * calling the standard DMA mapping API. Drivers should not call this function
639 * directly, instead it should only be used as an implementation for
640 * &drm_gem_object_funcs.get_sg_table.
642 * Drivers who need to acquire an scatter/gather table for objects need to call
643 * drm_gem_shmem_get_pages_sgt() instead.
646 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
648 struct sg_table
*drm_gem_shmem_get_sg_table(struct drm_gem_object
*obj
)
650 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
652 WARN_ON(shmem
->base
.import_attach
);
654 return drm_prime_pages_to_sg(obj
->dev
, shmem
->pages
, obj
->size
>> PAGE_SHIFT
);
656 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table
);
659 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
660 * scatter/gather table for a shmem GEM object.
663 * This function returns a scatter/gather table suitable for driver usage. If
664 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
667 * This is the main function for drivers to get at backing storage, and it hides
668 * and difference between dma-buf imported and natively allocated objects.
669 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
672 * A pointer to the scatter/gather table of pinned pages or errno on failure.
674 struct sg_table
*drm_gem_shmem_get_pages_sgt(struct drm_gem_object
*obj
)
677 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
678 struct sg_table
*sgt
;
683 WARN_ON(obj
->import_attach
);
685 ret
= drm_gem_shmem_get_pages(shmem
);
689 sgt
= drm_gem_shmem_get_sg_table(&shmem
->base
);
694 /* Map the pages for use by the h/w. */
695 ret
= dma_map_sgtable(obj
->dev
->dev
, sgt
, DMA_BIDIRECTIONAL
, 0);
707 drm_gem_shmem_put_pages(shmem
);
710 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt
);
713 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
714 * another driver's scatter/gather table of pinned pages
715 * @dev: Device to import into
716 * @attach: DMA-BUF attachment
717 * @sgt: Scatter/gather table of pinned pages
719 * This function imports a scatter/gather table exported via DMA-BUF by
720 * another driver. Drivers that use the shmem helpers should set this as their
721 * &drm_driver.gem_prime_import_sg_table callback.
724 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
725 * error code on failure.
727 struct drm_gem_object
*
728 drm_gem_shmem_prime_import_sg_table(struct drm_device
*dev
,
729 struct dma_buf_attachment
*attach
,
730 struct sg_table
*sgt
)
732 size_t size
= PAGE_ALIGN(attach
->dmabuf
->size
);
733 struct drm_gem_shmem_object
*shmem
;
735 shmem
= __drm_gem_shmem_create(dev
, size
, true);
737 return ERR_CAST(shmem
);
741 DRM_DEBUG_PRIME("size = %zu\n", size
);
745 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table
);