3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
13 #include <drm/drm_vma_manager.h>
15 #include <linux/shmem_fs.h>
16 #include <drm/exynos_drm.h>
18 #include "exynos_drm_drv.h"
19 #include "exynos_drm_gem.h"
20 #include "exynos_drm_buf.h"
21 #include "exynos_drm_iommu.h"
23 static unsigned int convert_to_vm_err_msg(int msg
)
31 out_msg
= VM_FAULT_NOPAGE
;
35 out_msg
= VM_FAULT_OOM
;
39 out_msg
= VM_FAULT_SIGBUS
;
46 static int check_gem_flags(unsigned int flags
)
48 if (flags
& ~(EXYNOS_BO_MASK
)) {
49 DRM_ERROR("invalid flags.\n");
56 static void update_vm_cache_attr(struct exynos_drm_gem_obj
*obj
,
57 struct vm_area_struct
*vma
)
59 DRM_DEBUG_KMS("flags = 0x%x\n", obj
->flags
);
61 /* non-cachable as default. */
62 if (obj
->flags
& EXYNOS_BO_CACHABLE
)
63 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
64 else if (obj
->flags
& EXYNOS_BO_WC
)
66 pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
69 pgprot_noncached(vm_get_page_prot(vma
->vm_flags
));
72 static unsigned long roundup_gem_size(unsigned long size
, unsigned int flags
)
76 return roundup(size
, PAGE_SIZE
);
79 static int exynos_drm_gem_map_buf(struct drm_gem_object
*obj
,
80 struct vm_area_struct
*vma
,
81 unsigned long f_vaddr
,
84 struct exynos_drm_gem_obj
*exynos_gem_obj
= to_exynos_gem_obj(obj
);
85 struct exynos_drm_gem_buf
*buf
= exynos_gem_obj
->buffer
;
86 struct scatterlist
*sgl
;
93 if (page_offset
>= (buf
->size
>> PAGE_SHIFT
)) {
94 DRM_ERROR("invalid page offset\n");
99 for_each_sg(buf
->sgt
->sgl
, sgl
, buf
->sgt
->nents
, i
) {
100 if (page_offset
< (sgl
->length
>> PAGE_SHIFT
))
102 page_offset
-= (sgl
->length
>> PAGE_SHIFT
);
105 pfn
= __phys_to_pfn(sg_phys(sgl
)) + page_offset
;
107 return vm_insert_mixed(vma
, f_vaddr
, pfn
);
110 static int exynos_drm_gem_handle_create(struct drm_gem_object
*obj
,
111 struct drm_file
*file_priv
,
112 unsigned int *handle
)
117 * allocate a id of idr table where the obj is registered
118 * and handle has the id what user can see.
120 ret
= drm_gem_handle_create(file_priv
, obj
, handle
);
124 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle
);
126 /* drop reference from allocate - handle holds it now. */
127 drm_gem_object_unreference_unlocked(obj
);
132 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj
*exynos_gem_obj
)
134 struct drm_gem_object
*obj
;
135 struct exynos_drm_gem_buf
*buf
;
137 obj
= &exynos_gem_obj
->base
;
138 buf
= exynos_gem_obj
->buffer
;
140 DRM_DEBUG_KMS("handle count = %d\n", obj
->handle_count
);
143 * do not release memory region from exporter.
145 * the region will be released by exporter
146 * once dmabuf's refcount becomes 0.
148 if (obj
->import_attach
)
151 exynos_drm_free_buf(obj
->dev
, exynos_gem_obj
->flags
, buf
);
154 exynos_drm_fini_buf(obj
->dev
, buf
);
155 exynos_gem_obj
->buffer
= NULL
;
157 drm_gem_free_mmap_offset(obj
);
159 /* release file pointer to gem object. */
160 drm_gem_object_release(obj
);
162 kfree(exynos_gem_obj
);
163 exynos_gem_obj
= NULL
;
166 unsigned long exynos_drm_gem_get_size(struct drm_device
*dev
,
167 unsigned int gem_handle
,
168 struct drm_file
*file_priv
)
170 struct exynos_drm_gem_obj
*exynos_gem_obj
;
171 struct drm_gem_object
*obj
;
173 obj
= drm_gem_object_lookup(dev
, file_priv
, gem_handle
);
175 DRM_ERROR("failed to lookup gem object.\n");
179 exynos_gem_obj
= to_exynos_gem_obj(obj
);
181 drm_gem_object_unreference_unlocked(obj
);
183 return exynos_gem_obj
->buffer
->size
;
187 struct exynos_drm_gem_obj
*exynos_drm_gem_init(struct drm_device
*dev
,
190 struct exynos_drm_gem_obj
*exynos_gem_obj
;
191 struct drm_gem_object
*obj
;
194 exynos_gem_obj
= kzalloc(sizeof(*exynos_gem_obj
), GFP_KERNEL
);
198 exynos_gem_obj
->size
= size
;
199 obj
= &exynos_gem_obj
->base
;
201 ret
= drm_gem_object_init(dev
, obj
, size
);
203 DRM_ERROR("failed to initialize gem object\n");
204 kfree(exynos_gem_obj
);
208 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj
->filp
);
210 return exynos_gem_obj
;
213 struct exynos_drm_gem_obj
*exynos_drm_gem_create(struct drm_device
*dev
,
217 struct exynos_drm_gem_obj
*exynos_gem_obj
;
218 struct exynos_drm_gem_buf
*buf
;
222 DRM_ERROR("invalid size.\n");
223 return ERR_PTR(-EINVAL
);
226 size
= roundup_gem_size(size
, flags
);
228 ret
= check_gem_flags(flags
);
232 buf
= exynos_drm_init_buf(dev
, size
);
234 return ERR_PTR(-ENOMEM
);
236 exynos_gem_obj
= exynos_drm_gem_init(dev
, size
);
237 if (!exynos_gem_obj
) {
242 exynos_gem_obj
->buffer
= buf
;
244 /* set memory type and cache attribute from user side. */
245 exynos_gem_obj
->flags
= flags
;
247 ret
= exynos_drm_alloc_buf(dev
, buf
, flags
);
251 return exynos_gem_obj
;
254 drm_gem_object_release(&exynos_gem_obj
->base
);
255 kfree(exynos_gem_obj
);
257 exynos_drm_fini_buf(dev
, buf
);
261 int exynos_drm_gem_create_ioctl(struct drm_device
*dev
, void *data
,
262 struct drm_file
*file_priv
)
264 struct drm_exynos_gem_create
*args
= data
;
265 struct exynos_drm_gem_obj
*exynos_gem_obj
;
268 exynos_gem_obj
= exynos_drm_gem_create(dev
, args
->flags
, args
->size
);
269 if (IS_ERR(exynos_gem_obj
))
270 return PTR_ERR(exynos_gem_obj
);
272 ret
= exynos_drm_gem_handle_create(&exynos_gem_obj
->base
, file_priv
,
275 exynos_drm_gem_destroy(exynos_gem_obj
);
282 dma_addr_t
*exynos_drm_gem_get_dma_addr(struct drm_device
*dev
,
283 unsigned int gem_handle
,
284 struct drm_file
*filp
)
286 struct exynos_drm_gem_obj
*exynos_gem_obj
;
287 struct drm_gem_object
*obj
;
289 obj
= drm_gem_object_lookup(dev
, filp
, gem_handle
);
291 DRM_ERROR("failed to lookup gem object.\n");
292 return ERR_PTR(-EINVAL
);
295 exynos_gem_obj
= to_exynos_gem_obj(obj
);
297 return &exynos_gem_obj
->buffer
->dma_addr
;
300 void exynos_drm_gem_put_dma_addr(struct drm_device
*dev
,
301 unsigned int gem_handle
,
302 struct drm_file
*filp
)
304 struct exynos_drm_gem_obj
*exynos_gem_obj
;
305 struct drm_gem_object
*obj
;
307 obj
= drm_gem_object_lookup(dev
, filp
, gem_handle
);
309 DRM_ERROR("failed to lookup gem object.\n");
313 exynos_gem_obj
= to_exynos_gem_obj(obj
);
315 drm_gem_object_unreference_unlocked(obj
);
318 * decrease obj->refcount one more time because we has already
319 * increased it at exynos_drm_gem_get_dma_addr().
321 drm_gem_object_unreference_unlocked(obj
);
324 int exynos_drm_gem_map_offset_ioctl(struct drm_device
*dev
, void *data
,
325 struct drm_file
*file_priv
)
327 struct drm_exynos_gem_map_off
*args
= data
;
329 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
330 args
->handle
, (unsigned long)args
->offset
);
332 if (!(dev
->driver
->driver_features
& DRIVER_GEM
)) {
333 DRM_ERROR("does not support GEM.\n");
337 return exynos_drm_gem_dumb_map_offset(file_priv
, dev
, args
->handle
,
341 static struct drm_file
*exynos_drm_find_drm_file(struct drm_device
*drm_dev
,
344 struct drm_file
*file_priv
;
346 /* find current process's drm_file from filelist. */
347 list_for_each_entry(file_priv
, &drm_dev
->filelist
, lhead
)
348 if (file_priv
->filp
== filp
)
353 return ERR_PTR(-EFAULT
);
356 static int exynos_drm_gem_mmap_buffer(struct file
*filp
,
357 struct vm_area_struct
*vma
)
359 struct drm_gem_object
*obj
= filp
->private_data
;
360 struct exynos_drm_gem_obj
*exynos_gem_obj
= to_exynos_gem_obj(obj
);
361 struct drm_device
*drm_dev
= obj
->dev
;
362 struct exynos_drm_gem_buf
*buffer
;
363 struct drm_file
*file_priv
;
364 unsigned long vm_size
;
367 vma
->vm_flags
|= VM_IO
| VM_DONTEXPAND
| VM_DONTDUMP
;
368 vma
->vm_private_data
= obj
;
369 vma
->vm_ops
= drm_dev
->driver
->gem_vm_ops
;
371 /* restore it to driver's fops. */
372 filp
->f_op
= fops_get(drm_dev
->driver
->fops
);
374 file_priv
= exynos_drm_find_drm_file(drm_dev
, filp
);
375 if (IS_ERR(file_priv
))
376 return PTR_ERR(file_priv
);
378 /* restore it to drm_file. */
379 filp
->private_data
= file_priv
;
381 update_vm_cache_attr(exynos_gem_obj
, vma
);
383 vm_size
= vma
->vm_end
- vma
->vm_start
;
386 * a buffer contains information to physically continuous memory
387 * allocated by user request or at framebuffer creation.
389 buffer
= exynos_gem_obj
->buffer
;
391 /* check if user-requested size is valid. */
392 if (vm_size
> buffer
->size
)
395 ret
= dma_mmap_attrs(drm_dev
->dev
, vma
, buffer
->pages
,
396 buffer
->dma_addr
, buffer
->size
,
399 DRM_ERROR("failed to mmap.\n");
404 * take a reference to this mapping of the object. And this reference
405 * is unreferenced by the corresponding vm_close call.
407 drm_gem_object_reference(obj
);
409 drm_vm_open_locked(drm_dev
, vma
);
414 static const struct file_operations exynos_drm_gem_fops
= {
415 .mmap
= exynos_drm_gem_mmap_buffer
,
418 int exynos_drm_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
419 struct drm_file
*file_priv
)
421 struct drm_exynos_gem_mmap
*args
= data
;
422 struct drm_gem_object
*obj
;
425 if (!(dev
->driver
->driver_features
& DRIVER_GEM
)) {
426 DRM_ERROR("does not support GEM.\n");
430 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
432 DRM_ERROR("failed to lookup gem object.\n");
437 * We have to use gem object and its fops for specific mmaper,
438 * but vm_mmap() can deliver only filp. So we have to change
439 * filp->f_op and filp->private_data temporarily, then restore
440 * again. So it is important to keep lock until restoration the
441 * settings to prevent others from misuse of filp->f_op or
442 * filp->private_data.
444 mutex_lock(&dev
->struct_mutex
);
447 * Set specific mmper's fops. And it will be restored by
448 * exynos_drm_gem_mmap_buffer to dev->driver->fops.
449 * This is used to call specific mapper temporarily.
451 file_priv
->filp
->f_op
= &exynos_drm_gem_fops
;
454 * Set gem object to private_data so that specific mmaper
455 * can get the gem object. And it will be restored by
456 * exynos_drm_gem_mmap_buffer to drm_file.
458 file_priv
->filp
->private_data
= obj
;
460 addr
= vm_mmap(file_priv
->filp
, 0, args
->size
,
461 PROT_READ
| PROT_WRITE
, MAP_SHARED
, 0);
463 drm_gem_object_unreference(obj
);
465 if (IS_ERR_VALUE(addr
)) {
466 /* check filp->f_op, filp->private_data are restored */
467 if (file_priv
->filp
->f_op
== &exynos_drm_gem_fops
) {
468 file_priv
->filp
->f_op
= fops_get(dev
->driver
->fops
);
469 file_priv
->filp
->private_data
= file_priv
;
471 mutex_unlock(&dev
->struct_mutex
);
475 mutex_unlock(&dev
->struct_mutex
);
479 DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args
->mapped
);
484 int exynos_drm_gem_get_ioctl(struct drm_device
*dev
, void *data
,
485 struct drm_file
*file_priv
)
486 { struct exynos_drm_gem_obj
*exynos_gem_obj
;
487 struct drm_exynos_gem_info
*args
= data
;
488 struct drm_gem_object
*obj
;
490 mutex_lock(&dev
->struct_mutex
);
492 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
494 DRM_ERROR("failed to lookup gem object.\n");
495 mutex_unlock(&dev
->struct_mutex
);
499 exynos_gem_obj
= to_exynos_gem_obj(obj
);
501 args
->flags
= exynos_gem_obj
->flags
;
502 args
->size
= exynos_gem_obj
->size
;
504 drm_gem_object_unreference(obj
);
505 mutex_unlock(&dev
->struct_mutex
);
510 struct vm_area_struct
*exynos_gem_get_vma(struct vm_area_struct
*vma
)
512 struct vm_area_struct
*vma_copy
;
514 vma_copy
= kmalloc(sizeof(*vma_copy
), GFP_KERNEL
);
518 if (vma
->vm_ops
&& vma
->vm_ops
->open
)
519 vma
->vm_ops
->open(vma
);
522 get_file(vma
->vm_file
);
524 memcpy(vma_copy
, vma
, sizeof(*vma
));
526 vma_copy
->vm_mm
= NULL
;
527 vma_copy
->vm_next
= NULL
;
528 vma_copy
->vm_prev
= NULL
;
533 void exynos_gem_put_vma(struct vm_area_struct
*vma
)
538 if (vma
->vm_ops
&& vma
->vm_ops
->close
)
539 vma
->vm_ops
->close(vma
);
547 int exynos_gem_get_pages_from_userptr(unsigned long start
,
550 struct vm_area_struct
*vma
)
554 /* the memory region mmaped with VM_PFNMAP. */
555 if (vma_is_io(vma
)) {
558 for (i
= 0; i
< npages
; ++i
, start
+= PAGE_SIZE
) {
560 int ret
= follow_pfn(vma
, start
, &pfn
);
564 pages
[i
] = pfn_to_page(pfn
);
568 DRM_ERROR("failed to get user_pages.\n");
575 get_npages
= get_user_pages(current
, current
->mm
, start
,
576 npages
, 1, 1, pages
, NULL
);
577 get_npages
= max(get_npages
, 0);
578 if (get_npages
!= npages
) {
579 DRM_ERROR("failed to get user_pages.\n");
581 put_page(pages
[--get_npages
]);
588 void exynos_gem_put_pages_to_userptr(struct page
**pages
,
590 struct vm_area_struct
*vma
)
592 if (!vma_is_io(vma
)) {
595 for (i
= 0; i
< npages
; i
++) {
596 set_page_dirty_lock(pages
[i
]);
599 * undo the reference we took when populating
607 int exynos_gem_map_sgt_with_dma(struct drm_device
*drm_dev
,
608 struct sg_table
*sgt
,
609 enum dma_data_direction dir
)
613 mutex_lock(&drm_dev
->struct_mutex
);
615 nents
= dma_map_sg(drm_dev
->dev
, sgt
->sgl
, sgt
->nents
, dir
);
617 DRM_ERROR("failed to map sgl with dma.\n");
618 mutex_unlock(&drm_dev
->struct_mutex
);
622 mutex_unlock(&drm_dev
->struct_mutex
);
626 void exynos_gem_unmap_sgt_from_dma(struct drm_device
*drm_dev
,
627 struct sg_table
*sgt
,
628 enum dma_data_direction dir
)
630 dma_unmap_sg(drm_dev
->dev
, sgt
->sgl
, sgt
->nents
, dir
);
633 int exynos_drm_gem_init_object(struct drm_gem_object
*obj
)
638 void exynos_drm_gem_free_object(struct drm_gem_object
*obj
)
640 struct exynos_drm_gem_obj
*exynos_gem_obj
;
641 struct exynos_drm_gem_buf
*buf
;
643 exynos_gem_obj
= to_exynos_gem_obj(obj
);
644 buf
= exynos_gem_obj
->buffer
;
646 if (obj
->import_attach
)
647 drm_prime_gem_destroy(obj
, buf
->sgt
);
649 exynos_drm_gem_destroy(to_exynos_gem_obj(obj
));
652 int exynos_drm_gem_dumb_create(struct drm_file
*file_priv
,
653 struct drm_device
*dev
,
654 struct drm_mode_create_dumb
*args
)
656 struct exynos_drm_gem_obj
*exynos_gem_obj
;
660 * alocate memory to be used for framebuffer.
661 * - this callback would be called by user application
662 * with DRM_IOCTL_MODE_CREATE_DUMB command.
665 args
->pitch
= args
->width
* ((args
->bpp
+ 7) / 8);
666 args
->size
= args
->pitch
* args
->height
;
668 exynos_gem_obj
= exynos_drm_gem_create(dev
, EXYNOS_BO_CONTIG
|
669 EXYNOS_BO_WC
, args
->size
);
671 * If physically contiguous memory allocation fails and if IOMMU is
672 * supported then try to get buffer from non physically contiguous
675 if (IS_ERR(exynos_gem_obj
) && is_drm_iommu_supported(dev
)) {
676 dev_warn(dev
->dev
, "contiguous FB allocation failed, falling back to non-contiguous\n");
677 exynos_gem_obj
= exynos_drm_gem_create(dev
,
678 EXYNOS_BO_NONCONTIG
| EXYNOS_BO_WC
,
682 if (IS_ERR(exynos_gem_obj
))
683 return PTR_ERR(exynos_gem_obj
);
685 ret
= exynos_drm_gem_handle_create(&exynos_gem_obj
->base
, file_priv
,
688 exynos_drm_gem_destroy(exynos_gem_obj
);
695 int exynos_drm_gem_dumb_map_offset(struct drm_file
*file_priv
,
696 struct drm_device
*dev
, uint32_t handle
,
699 struct drm_gem_object
*obj
;
702 mutex_lock(&dev
->struct_mutex
);
705 * get offset of memory allocated for drm framebuffer.
706 * - this callback would be called by user application
707 * with DRM_IOCTL_MODE_MAP_DUMB command.
710 obj
= drm_gem_object_lookup(dev
, file_priv
, handle
);
712 DRM_ERROR("failed to lookup gem object.\n");
717 ret
= drm_gem_create_mmap_offset(obj
);
721 *offset
= drm_vma_node_offset_addr(&obj
->vma_node
);
722 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset
);
725 drm_gem_object_unreference(obj
);
727 mutex_unlock(&dev
->struct_mutex
);
731 int exynos_drm_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
733 struct drm_gem_object
*obj
= vma
->vm_private_data
;
734 struct drm_device
*dev
= obj
->dev
;
735 unsigned long f_vaddr
;
739 page_offset
= ((unsigned long)vmf
->virtual_address
-
740 vma
->vm_start
) >> PAGE_SHIFT
;
741 f_vaddr
= (unsigned long)vmf
->virtual_address
;
743 mutex_lock(&dev
->struct_mutex
);
745 ret
= exynos_drm_gem_map_buf(obj
, vma
, f_vaddr
, page_offset
);
747 DRM_ERROR("failed to map a buffer with user.\n");
749 mutex_unlock(&dev
->struct_mutex
);
751 return convert_to_vm_err_msg(ret
);
754 int exynos_drm_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
756 struct exynos_drm_gem_obj
*exynos_gem_obj
;
757 struct drm_gem_object
*obj
;
760 /* set vm_area_struct. */
761 ret
= drm_gem_mmap(filp
, vma
);
763 DRM_ERROR("failed to mmap.\n");
767 obj
= vma
->vm_private_data
;
768 exynos_gem_obj
= to_exynos_gem_obj(obj
);
770 ret
= check_gem_flags(exynos_gem_obj
->flags
);
772 drm_gem_vm_close(vma
);
773 drm_gem_free_mmap_offset(obj
);
777 vma
->vm_flags
&= ~VM_PFNMAP
;
778 vma
->vm_flags
|= VM_MIXEDMAP
;
780 update_vm_cache_attr(exynos_gem_obj
, vma
);