1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2018 Noralf Trønnes
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/mutex.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
14 #include <drm/drm_device.h>
15 #include <drm/drm_drv.h>
16 #include <drm/drm_gem_shmem_helper.h>
17 #include <drm/drm_prime.h>
18 #include <drm/drm_print.h>
23 * This library provides helpers for GEM objects backed by shmem buffers
24 * allocated using anonymous pageable memory.
27 static const struct drm_gem_object_funcs drm_gem_shmem_funcs
= {
28 .free
= drm_gem_shmem_free_object
,
29 .print_info
= drm_gem_shmem_print_info
,
30 .pin
= drm_gem_shmem_pin
,
31 .unpin
= drm_gem_shmem_unpin
,
32 .get_sg_table
= drm_gem_shmem_get_sg_table
,
33 .vmap
= drm_gem_shmem_vmap
,
34 .vunmap
= drm_gem_shmem_vunmap
,
35 .mmap
= drm_gem_shmem_mmap
,
39 * drm_gem_shmem_create - Allocate an object with the given size
41 * @size: Size of the object to allocate
43 * This function creates a shmem GEM object.
46 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
47 * error code on failure.
49 struct drm_gem_shmem_object
*drm_gem_shmem_create(struct drm_device
*dev
, size_t size
)
51 struct drm_gem_shmem_object
*shmem
;
52 struct drm_gem_object
*obj
;
55 size
= PAGE_ALIGN(size
);
57 if (dev
->driver
->gem_create_object
)
58 obj
= dev
->driver
->gem_create_object(dev
, size
);
60 obj
= kzalloc(sizeof(*shmem
), GFP_KERNEL
);
62 return ERR_PTR(-ENOMEM
);
65 obj
->funcs
= &drm_gem_shmem_funcs
;
67 ret
= drm_gem_object_init(dev
, obj
, size
);
71 ret
= drm_gem_create_mmap_offset(obj
);
75 shmem
= to_drm_gem_shmem_obj(obj
);
76 mutex_init(&shmem
->pages_lock
);
77 mutex_init(&shmem
->vmap_lock
);
78 INIT_LIST_HEAD(&shmem
->madv_list
);
81 * Our buffers are kept pinned, so allocating them
82 * from the MOVABLE zone is a really bad idea, and
83 * conflicts with CMA. See comments above new_inode()
84 * why this is required _and_ expected if you're
85 * going to pin these pages.
87 mapping_set_gfp_mask(obj
->filp
->f_mapping
, GFP_HIGHUSER
|
88 __GFP_RETRY_MAYFAIL
| __GFP_NOWARN
);
93 drm_gem_object_release(obj
);
99 EXPORT_SYMBOL_GPL(drm_gem_shmem_create
);
102 * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
103 * @obj: GEM object to free
105 * This function cleans up the GEM object state and frees the memory used to
106 * store the object itself.
108 void drm_gem_shmem_free_object(struct drm_gem_object
*obj
)
110 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
112 WARN_ON(shmem
->vmap_use_count
);
114 if (obj
->import_attach
) {
115 shmem
->pages_use_count
--;
116 drm_prime_gem_destroy(obj
, shmem
->sgt
);
117 kvfree(shmem
->pages
);
120 dma_unmap_sg(obj
->dev
->dev
, shmem
->sgt
->sgl
,
121 shmem
->sgt
->nents
, DMA_BIDIRECTIONAL
);
122 sg_free_table(shmem
->sgt
);
126 drm_gem_shmem_put_pages(shmem
);
129 WARN_ON(shmem
->pages_use_count
);
131 drm_gem_object_release(obj
);
132 mutex_destroy(&shmem
->pages_lock
);
133 mutex_destroy(&shmem
->vmap_lock
);
136 EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object
);
138 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object
*shmem
)
140 struct drm_gem_object
*obj
= &shmem
->base
;
143 if (shmem
->pages_use_count
++ > 0)
146 pages
= drm_gem_get_pages(obj
);
148 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages
));
149 shmem
->pages_use_count
= 0;
150 return PTR_ERR(pages
);
153 shmem
->pages
= pages
;
159 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
160 * @shmem: shmem GEM object
162 * This function makes sure that backing pages exists for the shmem GEM object
163 * and increases the use count.
166 * 0 on success or a negative error code on failure.
168 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object
*shmem
)
172 ret
= mutex_lock_interruptible(&shmem
->pages_lock
);
175 ret
= drm_gem_shmem_get_pages_locked(shmem
);
176 mutex_unlock(&shmem
->pages_lock
);
180 EXPORT_SYMBOL(drm_gem_shmem_get_pages
);
182 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object
*shmem
)
184 struct drm_gem_object
*obj
= &shmem
->base
;
186 if (WARN_ON_ONCE(!shmem
->pages_use_count
))
189 if (--shmem
->pages_use_count
> 0)
192 drm_gem_put_pages(obj
, shmem
->pages
,
193 shmem
->pages_mark_dirty_on_put
,
194 shmem
->pages_mark_accessed_on_put
);
199 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
200 * @shmem: shmem GEM object
202 * This function decreases the use count and puts the backing pages when use drops to zero.
204 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object
*shmem
)
206 mutex_lock(&shmem
->pages_lock
);
207 drm_gem_shmem_put_pages_locked(shmem
);
208 mutex_unlock(&shmem
->pages_lock
);
210 EXPORT_SYMBOL(drm_gem_shmem_put_pages
);
213 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
216 * This function makes sure the backing pages are pinned in memory while the
217 * buffer is exported.
220 * 0 on success or a negative error code on failure.
222 int drm_gem_shmem_pin(struct drm_gem_object
*obj
)
224 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
226 return drm_gem_shmem_get_pages(shmem
);
228 EXPORT_SYMBOL(drm_gem_shmem_pin
);
231 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
234 * This function removes the requirement that the backing pages are pinned in
237 void drm_gem_shmem_unpin(struct drm_gem_object
*obj
)
239 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
241 drm_gem_shmem_put_pages(shmem
);
243 EXPORT_SYMBOL(drm_gem_shmem_unpin
);
245 static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object
*shmem
)
247 struct drm_gem_object
*obj
= &shmem
->base
;
250 if (shmem
->vmap_use_count
++ > 0)
253 ret
= drm_gem_shmem_get_pages(shmem
);
257 if (obj
->import_attach
)
258 shmem
->vaddr
= dma_buf_vmap(obj
->import_attach
->dmabuf
);
260 shmem
->vaddr
= vmap(shmem
->pages
, obj
->size
>> PAGE_SHIFT
,
261 VM_MAP
, pgprot_writecombine(PAGE_KERNEL
));
264 DRM_DEBUG_KMS("Failed to vmap pages\n");
272 drm_gem_shmem_put_pages(shmem
);
274 shmem
->vmap_use_count
= 0;
280 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
281 * @shmem: shmem GEM object
283 * This function makes sure that a virtual address exists for the buffer backing
284 * the shmem GEM object.
287 * 0 on success or a negative error code on failure.
289 void *drm_gem_shmem_vmap(struct drm_gem_object
*obj
)
291 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
295 ret
= mutex_lock_interruptible(&shmem
->vmap_lock
);
298 vaddr
= drm_gem_shmem_vmap_locked(shmem
);
299 mutex_unlock(&shmem
->vmap_lock
);
303 EXPORT_SYMBOL(drm_gem_shmem_vmap
);
305 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object
*shmem
)
307 struct drm_gem_object
*obj
= &shmem
->base
;
309 if (WARN_ON_ONCE(!shmem
->vmap_use_count
))
312 if (--shmem
->vmap_use_count
> 0)
315 if (obj
->import_attach
)
316 dma_buf_vunmap(obj
->import_attach
->dmabuf
, shmem
->vaddr
);
318 vunmap(shmem
->vaddr
);
321 drm_gem_shmem_put_pages(shmem
);
325 * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
326 * @shmem: shmem GEM object
328 * This function removes the virtual address when use count drops to zero.
330 void drm_gem_shmem_vunmap(struct drm_gem_object
*obj
, void *vaddr
)
332 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
334 mutex_lock(&shmem
->vmap_lock
);
335 drm_gem_shmem_vunmap_locked(shmem
);
336 mutex_unlock(&shmem
->vmap_lock
);
338 EXPORT_SYMBOL(drm_gem_shmem_vunmap
);
340 struct drm_gem_shmem_object
*
341 drm_gem_shmem_create_with_handle(struct drm_file
*file_priv
,
342 struct drm_device
*dev
, size_t size
,
345 struct drm_gem_shmem_object
*shmem
;
348 shmem
= drm_gem_shmem_create(dev
, size
);
353 * Allocate an id of idr table where the obj is registered
354 * and handle has the id what user can see.
356 ret
= drm_gem_handle_create(file_priv
, &shmem
->base
, handle
);
357 /* drop reference from allocate - handle holds it now. */
358 drm_gem_object_put_unlocked(&shmem
->base
);
364 EXPORT_SYMBOL(drm_gem_shmem_create_with_handle
);
366 /* Update madvise status, returns true if not purged, else
369 int drm_gem_shmem_madvise(struct drm_gem_object
*obj
, int madv
)
371 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
373 mutex_lock(&shmem
->pages_lock
);
375 if (shmem
->madv
>= 0)
380 mutex_unlock(&shmem
->pages_lock
);
384 EXPORT_SYMBOL(drm_gem_shmem_madvise
);
386 void drm_gem_shmem_purge_locked(struct drm_gem_object
*obj
)
388 struct drm_device
*dev
= obj
->dev
;
389 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
391 WARN_ON(!drm_gem_shmem_is_purgeable(shmem
));
393 dma_unmap_sg(obj
->dev
->dev
, shmem
->sgt
->sgl
,
394 shmem
->sgt
->nents
, DMA_BIDIRECTIONAL
);
395 sg_free_table(shmem
->sgt
);
399 drm_gem_shmem_put_pages_locked(shmem
);
403 drm_vma_node_unmap(&obj
->vma_node
, dev
->anon_inode
->i_mapping
);
404 drm_gem_free_mmap_offset(obj
);
406 /* Our goal here is to return as much of the memory as
407 * is possible back to the system as we are called from OOM.
408 * To do this we must instruct the shmfs to drop all of its
409 * backing pages, *now*.
411 shmem_truncate_range(file_inode(obj
->filp
), 0, (loff_t
)-1);
413 invalidate_mapping_pages(file_inode(obj
->filp
)->i_mapping
,
416 EXPORT_SYMBOL(drm_gem_shmem_purge_locked
);
418 bool drm_gem_shmem_purge(struct drm_gem_object
*obj
)
420 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
422 if (!mutex_trylock(&shmem
->pages_lock
))
424 drm_gem_shmem_purge_locked(obj
);
425 mutex_unlock(&shmem
->pages_lock
);
429 EXPORT_SYMBOL(drm_gem_shmem_purge
);
432 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
433 * @file: DRM file structure to create the dumb buffer for
437 * This function computes the pitch of the dumb buffer and rounds it up to an
438 * integer number of bytes per pixel. Drivers for hardware that doesn't have
439 * any additional restrictions on the pitch can directly use this function as
440 * their &drm_driver.dumb_create callback.
442 * For hardware with additional restrictions, drivers can adjust the fields
443 * set up by userspace before calling into this function.
446 * 0 on success or a negative error code on failure.
448 int drm_gem_shmem_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
449 struct drm_mode_create_dumb
*args
)
451 u32 min_pitch
= DIV_ROUND_UP(args
->width
* args
->bpp
, 8);
452 struct drm_gem_shmem_object
*shmem
;
454 if (!args
->pitch
|| !args
->size
) {
455 args
->pitch
= min_pitch
;
456 args
->size
= args
->pitch
* args
->height
;
458 /* ensure sane minimum values */
459 if (args
->pitch
< min_pitch
)
460 args
->pitch
= min_pitch
;
461 if (args
->size
< args
->pitch
* args
->height
)
462 args
->size
= args
->pitch
* args
->height
;
465 shmem
= drm_gem_shmem_create_with_handle(file
, dev
, args
->size
, &args
->handle
);
467 return PTR_ERR_OR_ZERO(shmem
);
469 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create
);
471 static vm_fault_t
drm_gem_shmem_fault(struct vm_fault
*vmf
)
473 struct vm_area_struct
*vma
= vmf
->vma
;
474 struct drm_gem_object
*obj
= vma
->vm_private_data
;
475 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
476 loff_t num_pages
= obj
->size
>> PAGE_SHIFT
;
479 if (vmf
->pgoff
>= num_pages
|| WARN_ON_ONCE(!shmem
->pages
))
480 return VM_FAULT_SIGBUS
;
482 page
= shmem
->pages
[vmf
->pgoff
];
484 return vmf_insert_page(vma
, vmf
->address
, page
);
487 static void drm_gem_shmem_vm_open(struct vm_area_struct
*vma
)
489 struct drm_gem_object
*obj
= vma
->vm_private_data
;
490 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
493 ret
= drm_gem_shmem_get_pages(shmem
);
494 WARN_ON_ONCE(ret
!= 0);
496 drm_gem_vm_open(vma
);
499 static void drm_gem_shmem_vm_close(struct vm_area_struct
*vma
)
501 struct drm_gem_object
*obj
= vma
->vm_private_data
;
502 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
504 drm_gem_shmem_put_pages(shmem
);
505 drm_gem_vm_close(vma
);
508 static const struct vm_operations_struct drm_gem_shmem_vm_ops
= {
509 .fault
= drm_gem_shmem_fault
,
510 .open
= drm_gem_shmem_vm_open
,
511 .close
= drm_gem_shmem_vm_close
,
515 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
517 * @vma: VMA for the area to be mapped
519 * This function implements an augmented version of the GEM DRM file mmap
520 * operation for shmem objects. Drivers which employ the shmem helpers should
521 * use this function as their &drm_gem_object_funcs.mmap handler.
524 * 0 on success or a negative error code on failure.
526 int drm_gem_shmem_mmap(struct drm_gem_object
*obj
, struct vm_area_struct
*vma
)
528 struct drm_gem_shmem_object
*shmem
;
531 /* Remove the fake offset */
532 vma
->vm_pgoff
-= drm_vma_node_start(&obj
->vma_node
);
534 shmem
= to_drm_gem_shmem_obj(obj
);
536 ret
= drm_gem_shmem_get_pages(shmem
);
538 drm_gem_vm_close(vma
);
542 vma
->vm_flags
|= VM_MIXEDMAP
| VM_DONTEXPAND
;
543 vma
->vm_page_prot
= pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
544 vma
->vm_page_prot
= pgprot_decrypted(vma
->vm_page_prot
);
545 vma
->vm_ops
= &drm_gem_shmem_vm_ops
;
549 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap
);
552 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
554 * @indent: Tab indentation level
557 void drm_gem_shmem_print_info(struct drm_printer
*p
, unsigned int indent
,
558 const struct drm_gem_object
*obj
)
560 const struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
562 drm_printf_indent(p
, indent
, "pages_use_count=%u\n", shmem
->pages_use_count
);
563 drm_printf_indent(p
, indent
, "vmap_use_count=%u\n", shmem
->vmap_use_count
);
564 drm_printf_indent(p
, indent
, "vaddr=%p\n", shmem
->vaddr
);
566 EXPORT_SYMBOL(drm_gem_shmem_print_info
);
569 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
570 * pages for a shmem GEM object
573 * This function exports a scatter/gather table suitable for PRIME usage by
574 * calling the standard DMA mapping API.
577 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
579 struct sg_table
*drm_gem_shmem_get_sg_table(struct drm_gem_object
*obj
)
581 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
583 return drm_prime_pages_to_sg(shmem
->pages
, obj
->size
>> PAGE_SHIFT
);
585 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table
);
588 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
589 * scatter/gather table for a shmem GEM object.
592 * This function returns a scatter/gather table suitable for driver usage. If
593 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
597 * A pointer to the scatter/gather table of pinned pages or errno on failure.
599 struct sg_table
*drm_gem_shmem_get_pages_sgt(struct drm_gem_object
*obj
)
602 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
603 struct sg_table
*sgt
;
608 WARN_ON(obj
->import_attach
);
610 ret
= drm_gem_shmem_get_pages(shmem
);
614 sgt
= drm_gem_shmem_get_sg_table(&shmem
->base
);
619 /* Map the pages for use by the h/w. */
620 dma_map_sg(obj
->dev
->dev
, sgt
->sgl
, sgt
->nents
, DMA_BIDIRECTIONAL
);
627 drm_gem_shmem_put_pages(shmem
);
630 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt
);
633 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
634 * another driver's scatter/gather table of pinned pages
635 * @dev: Device to import into
636 * @attach: DMA-BUF attachment
637 * @sgt: Scatter/gather table of pinned pages
639 * This function imports a scatter/gather table exported via DMA-BUF by
640 * another driver. Drivers that use the shmem helpers should set this as their
641 * &drm_driver.gem_prime_import_sg_table callback.
644 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
645 * error code on failure.
647 struct drm_gem_object
*
648 drm_gem_shmem_prime_import_sg_table(struct drm_device
*dev
,
649 struct dma_buf_attachment
*attach
,
650 struct sg_table
*sgt
)
652 size_t size
= PAGE_ALIGN(attach
->dmabuf
->size
);
653 size_t npages
= size
>> PAGE_SHIFT
;
654 struct drm_gem_shmem_object
*shmem
;
657 shmem
= drm_gem_shmem_create(dev
, size
);
659 return ERR_CAST(shmem
);
661 shmem
->pages
= kvmalloc_array(npages
, sizeof(struct page
*), GFP_KERNEL
);
667 ret
= drm_prime_sg_to_page_addr_arrays(sgt
, shmem
->pages
, NULL
, npages
);
672 shmem
->pages_use_count
= 1; /* Permanently pinned from our point of view */
674 DRM_DEBUG_PRIME("size = %zu\n", size
);
679 kvfree(shmem
->pages
);
681 drm_gem_object_put_unlocked(&shmem
->base
);
685 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table
);