1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
5 * Author: Inki Dae <inki.dae@samsung.com>
9 #include <linux/dma-buf.h>
10 #include <linux/pfn_t.h>
11 #include <linux/shmem_fs.h>
13 #include <drm/drm_prime.h>
14 #include <drm/drm_vma_manager.h>
15 #include <drm/exynos_drm.h>
17 #include "exynos_drm_drv.h"
18 #include "exynos_drm_gem.h"
20 static int exynos_drm_alloc_buf(struct exynos_drm_gem
*exynos_gem
)
22 struct drm_device
*dev
= exynos_gem
->base
.dev
;
24 unsigned int nr_pages
;
28 if (exynos_gem
->dma_addr
) {
29 DRM_DEV_DEBUG_KMS(to_dma_dev(dev
), "already allocated.\n");
33 exynos_gem
->dma_attrs
= 0;
36 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
37 * region will be allocated else physically contiguous
40 if (!(exynos_gem
->flags
& EXYNOS_BO_NONCONTIG
))
41 exynos_gem
->dma_attrs
|= DMA_ATTR_FORCE_CONTIGUOUS
;
44 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
45 * else cachable mapping.
47 if (exynos_gem
->flags
& EXYNOS_BO_WC
||
48 !(exynos_gem
->flags
& EXYNOS_BO_CACHABLE
))
49 attr
= DMA_ATTR_WRITE_COMBINE
;
51 attr
= DMA_ATTR_NON_CONSISTENT
;
53 exynos_gem
->dma_attrs
|= attr
;
54 exynos_gem
->dma_attrs
|= DMA_ATTR_NO_KERNEL_MAPPING
;
56 nr_pages
= exynos_gem
->size
>> PAGE_SHIFT
;
58 exynos_gem
->pages
= kvmalloc_array(nr_pages
, sizeof(struct page
*),
59 GFP_KERNEL
| __GFP_ZERO
);
60 if (!exynos_gem
->pages
) {
61 DRM_DEV_ERROR(to_dma_dev(dev
), "failed to allocate pages.\n");
65 exynos_gem
->cookie
= dma_alloc_attrs(to_dma_dev(dev
), exynos_gem
->size
,
66 &exynos_gem
->dma_addr
, GFP_KERNEL
,
67 exynos_gem
->dma_attrs
);
68 if (!exynos_gem
->cookie
) {
69 DRM_DEV_ERROR(to_dma_dev(dev
), "failed to allocate buffer.\n");
73 ret
= dma_get_sgtable_attrs(to_dma_dev(dev
), &sgt
, exynos_gem
->cookie
,
74 exynos_gem
->dma_addr
, exynos_gem
->size
,
75 exynos_gem
->dma_attrs
);
77 DRM_DEV_ERROR(to_dma_dev(dev
), "failed to get sgtable.\n");
81 if (drm_prime_sg_to_page_addr_arrays(&sgt
, exynos_gem
->pages
, NULL
,
83 DRM_DEV_ERROR(to_dma_dev(dev
), "invalid sgtable.\n");
90 DRM_DEV_DEBUG_KMS(to_dma_dev(dev
), "dma_addr(0x%lx), size(0x%lx)\n",
91 (unsigned long)exynos_gem
->dma_addr
, exynos_gem
->size
);
98 dma_free_attrs(to_dma_dev(dev
), exynos_gem
->size
, exynos_gem
->cookie
,
99 exynos_gem
->dma_addr
, exynos_gem
->dma_attrs
);
101 kvfree(exynos_gem
->pages
);
106 static void exynos_drm_free_buf(struct exynos_drm_gem
*exynos_gem
)
108 struct drm_device
*dev
= exynos_gem
->base
.dev
;
110 if (!exynos_gem
->dma_addr
) {
111 DRM_DEV_DEBUG_KMS(dev
->dev
, "dma_addr is invalid.\n");
115 DRM_DEV_DEBUG_KMS(dev
->dev
, "dma_addr(0x%lx), size(0x%lx)\n",
116 (unsigned long)exynos_gem
->dma_addr
, exynos_gem
->size
);
118 dma_free_attrs(to_dma_dev(dev
), exynos_gem
->size
, exynos_gem
->cookie
,
119 (dma_addr_t
)exynos_gem
->dma_addr
,
120 exynos_gem
->dma_attrs
);
122 kvfree(exynos_gem
->pages
);
125 static int exynos_drm_gem_handle_create(struct drm_gem_object
*obj
,
126 struct drm_file
*file_priv
,
127 unsigned int *handle
)
132 * allocate a id of idr table where the obj is registered
133 * and handle has the id what user can see.
135 ret
= drm_gem_handle_create(file_priv
, obj
, handle
);
139 DRM_DEV_DEBUG_KMS(to_dma_dev(obj
->dev
), "gem handle = 0x%x\n", *handle
);
141 /* drop reference from allocate - handle holds it now. */
142 drm_gem_object_put_unlocked(obj
);
147 void exynos_drm_gem_destroy(struct exynos_drm_gem
*exynos_gem
)
149 struct drm_gem_object
*obj
= &exynos_gem
->base
;
151 DRM_DEV_DEBUG_KMS(to_dma_dev(obj
->dev
), "handle count = %d\n",
155 * do not release memory region from exporter.
157 * the region will be released by exporter
158 * once dmabuf's refcount becomes 0.
160 if (obj
->import_attach
)
161 drm_prime_gem_destroy(obj
, exynos_gem
->sgt
);
163 exynos_drm_free_buf(exynos_gem
);
165 /* release file pointer to gem object. */
166 drm_gem_object_release(obj
);
171 static struct exynos_drm_gem
*exynos_drm_gem_init(struct drm_device
*dev
,
174 struct exynos_drm_gem
*exynos_gem
;
175 struct drm_gem_object
*obj
;
178 exynos_gem
= kzalloc(sizeof(*exynos_gem
), GFP_KERNEL
);
180 return ERR_PTR(-ENOMEM
);
182 exynos_gem
->size
= size
;
183 obj
= &exynos_gem
->base
;
185 ret
= drm_gem_object_init(dev
, obj
, size
);
187 DRM_DEV_ERROR(dev
->dev
, "failed to initialize gem object\n");
192 ret
= drm_gem_create_mmap_offset(obj
);
194 drm_gem_object_release(obj
);
199 DRM_DEV_DEBUG_KMS(dev
->dev
, "created file object = %pK\n", obj
->filp
);
204 struct exynos_drm_gem
*exynos_drm_gem_create(struct drm_device
*dev
,
208 struct exynos_drm_gem
*exynos_gem
;
211 if (flags
& ~(EXYNOS_BO_MASK
)) {
212 DRM_DEV_ERROR(dev
->dev
,
213 "invalid GEM buffer flags: %u\n", flags
);
214 return ERR_PTR(-EINVAL
);
218 DRM_DEV_ERROR(dev
->dev
, "invalid GEM buffer size: %lu\n", size
);
219 return ERR_PTR(-EINVAL
);
222 size
= roundup(size
, PAGE_SIZE
);
224 exynos_gem
= exynos_drm_gem_init(dev
, size
);
225 if (IS_ERR(exynos_gem
))
228 if (!is_drm_iommu_supported(dev
) && (flags
& EXYNOS_BO_NONCONTIG
)) {
230 * when no IOMMU is available, all allocated buffers are
231 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
233 flags
&= ~EXYNOS_BO_NONCONTIG
;
234 DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
237 /* set memory type and cache attribute from user side. */
238 exynos_gem
->flags
= flags
;
240 ret
= exynos_drm_alloc_buf(exynos_gem
);
242 drm_gem_object_release(&exynos_gem
->base
);
250 int exynos_drm_gem_create_ioctl(struct drm_device
*dev
, void *data
,
251 struct drm_file
*file_priv
)
253 struct drm_exynos_gem_create
*args
= data
;
254 struct exynos_drm_gem
*exynos_gem
;
257 exynos_gem
= exynos_drm_gem_create(dev
, args
->flags
, args
->size
);
258 if (IS_ERR(exynos_gem
))
259 return PTR_ERR(exynos_gem
);
261 ret
= exynos_drm_gem_handle_create(&exynos_gem
->base
, file_priv
,
264 exynos_drm_gem_destroy(exynos_gem
);
271 int exynos_drm_gem_map_ioctl(struct drm_device
*dev
, void *data
,
272 struct drm_file
*file_priv
)
274 struct drm_exynos_gem_map
*args
= data
;
276 return drm_gem_dumb_map_offset(file_priv
, dev
, args
->handle
,
280 struct exynos_drm_gem
*exynos_drm_gem_get(struct drm_file
*filp
,
281 unsigned int gem_handle
)
283 struct drm_gem_object
*obj
;
285 obj
= drm_gem_object_lookup(filp
, gem_handle
);
288 return to_exynos_gem(obj
);
291 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem
*exynos_gem
,
292 struct vm_area_struct
*vma
)
294 struct drm_device
*drm_dev
= exynos_gem
->base
.dev
;
295 unsigned long vm_size
;
298 vma
->vm_flags
&= ~VM_PFNMAP
;
301 vm_size
= vma
->vm_end
- vma
->vm_start
;
303 /* check if user-requested size is valid. */
304 if (vm_size
> exynos_gem
->size
)
307 ret
= dma_mmap_attrs(to_dma_dev(drm_dev
), vma
, exynos_gem
->cookie
,
308 exynos_gem
->dma_addr
, exynos_gem
->size
,
309 exynos_gem
->dma_attrs
);
311 DRM_ERROR("failed to mmap.\n");
318 int exynos_drm_gem_get_ioctl(struct drm_device
*dev
, void *data
,
319 struct drm_file
*file_priv
)
321 struct exynos_drm_gem
*exynos_gem
;
322 struct drm_exynos_gem_info
*args
= data
;
323 struct drm_gem_object
*obj
;
325 obj
= drm_gem_object_lookup(file_priv
, args
->handle
);
327 DRM_DEV_ERROR(dev
->dev
, "failed to lookup gem object.\n");
331 exynos_gem
= to_exynos_gem(obj
);
333 args
->flags
= exynos_gem
->flags
;
334 args
->size
= exynos_gem
->size
;
336 drm_gem_object_put_unlocked(obj
);
341 void exynos_drm_gem_free_object(struct drm_gem_object
*obj
)
343 exynos_drm_gem_destroy(to_exynos_gem(obj
));
346 int exynos_drm_gem_dumb_create(struct drm_file
*file_priv
,
347 struct drm_device
*dev
,
348 struct drm_mode_create_dumb
*args
)
350 struct exynos_drm_gem
*exynos_gem
;
355 * allocate memory to be used for framebuffer.
356 * - this callback would be called by user application
357 * with DRM_IOCTL_MODE_CREATE_DUMB command.
360 args
->pitch
= args
->width
* ((args
->bpp
+ 7) / 8);
361 args
->size
= args
->pitch
* args
->height
;
363 if (is_drm_iommu_supported(dev
))
364 flags
= EXYNOS_BO_NONCONTIG
| EXYNOS_BO_WC
;
366 flags
= EXYNOS_BO_CONTIG
| EXYNOS_BO_WC
;
368 exynos_gem
= exynos_drm_gem_create(dev
, flags
, args
->size
);
369 if (IS_ERR(exynos_gem
)) {
370 dev_warn(dev
->dev
, "FB allocation failed.\n");
371 return PTR_ERR(exynos_gem
);
374 ret
= exynos_drm_gem_handle_create(&exynos_gem
->base
, file_priv
,
377 exynos_drm_gem_destroy(exynos_gem
);
384 vm_fault_t
exynos_drm_gem_fault(struct vm_fault
*vmf
)
386 struct vm_area_struct
*vma
= vmf
->vma
;
387 struct drm_gem_object
*obj
= vma
->vm_private_data
;
388 struct exynos_drm_gem
*exynos_gem
= to_exynos_gem(obj
);
392 page_offset
= (vmf
->address
- vma
->vm_start
) >> PAGE_SHIFT
;
394 if (page_offset
>= (exynos_gem
->size
>> PAGE_SHIFT
)) {
395 DRM_ERROR("invalid page offset\n");
396 return VM_FAULT_SIGBUS
;
399 pfn
= page_to_pfn(exynos_gem
->pages
[page_offset
]);
400 return vmf_insert_mixed(vma
, vmf
->address
,
401 __pfn_to_pfn_t(pfn
, PFN_DEV
));
404 static int exynos_drm_gem_mmap_obj(struct drm_gem_object
*obj
,
405 struct vm_area_struct
*vma
)
407 struct exynos_drm_gem
*exynos_gem
= to_exynos_gem(obj
);
410 DRM_DEV_DEBUG_KMS(to_dma_dev(obj
->dev
), "flags = 0x%x\n",
413 /* non-cachable as default. */
414 if (exynos_gem
->flags
& EXYNOS_BO_CACHABLE
)
415 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
416 else if (exynos_gem
->flags
& EXYNOS_BO_WC
)
418 pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
421 pgprot_noncached(vm_get_page_prot(vma
->vm_flags
));
423 ret
= exynos_drm_gem_mmap_buffer(exynos_gem
, vma
);
430 drm_gem_vm_close(vma
);
435 int exynos_drm_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
437 struct drm_gem_object
*obj
;
440 /* set vm_area_struct. */
441 ret
= drm_gem_mmap(filp
, vma
);
443 DRM_ERROR("failed to mmap.\n");
447 obj
= vma
->vm_private_data
;
449 if (obj
->import_attach
)
450 return dma_buf_mmap(obj
->dma_buf
, vma
, 0);
452 return exynos_drm_gem_mmap_obj(obj
, vma
);
455 /* low-level interface prime helpers */
456 struct drm_gem_object
*exynos_drm_gem_prime_import(struct drm_device
*dev
,
457 struct dma_buf
*dma_buf
)
459 return drm_gem_prime_import_dev(dev
, dma_buf
, to_dma_dev(dev
));
462 struct sg_table
*exynos_drm_gem_prime_get_sg_table(struct drm_gem_object
*obj
)
464 struct exynos_drm_gem
*exynos_gem
= to_exynos_gem(obj
);
467 npages
= exynos_gem
->size
>> PAGE_SHIFT
;
469 return drm_prime_pages_to_sg(exynos_gem
->pages
, npages
);
472 struct drm_gem_object
*
473 exynos_drm_gem_prime_import_sg_table(struct drm_device
*dev
,
474 struct dma_buf_attachment
*attach
,
475 struct sg_table
*sgt
)
477 struct exynos_drm_gem
*exynos_gem
;
481 exynos_gem
= exynos_drm_gem_init(dev
, attach
->dmabuf
->size
);
482 if (IS_ERR(exynos_gem
)) {
483 ret
= PTR_ERR(exynos_gem
);
487 exynos_gem
->dma_addr
= sg_dma_address(sgt
->sgl
);
489 npages
= exynos_gem
->size
>> PAGE_SHIFT
;
490 exynos_gem
->pages
= kvmalloc_array(npages
, sizeof(struct page
*), GFP_KERNEL
);
491 if (!exynos_gem
->pages
) {
496 ret
= drm_prime_sg_to_page_addr_arrays(sgt
, exynos_gem
->pages
, NULL
,
501 exynos_gem
->sgt
= sgt
;
503 if (sgt
->nents
== 1) {
504 /* always physically continuous memory if sgt->nents is 1. */
505 exynos_gem
->flags
|= EXYNOS_BO_CONTIG
;
508 * this case could be CONTIG or NONCONTIG type but for now
510 * TODO. we have to find a way that exporter can notify
511 * the type of its own buffer to importer.
513 exynos_gem
->flags
|= EXYNOS_BO_NONCONTIG
;
516 return &exynos_gem
->base
;
519 kvfree(exynos_gem
->pages
);
521 drm_gem_object_release(&exynos_gem
->base
);
526 void *exynos_drm_gem_prime_vmap(struct drm_gem_object
*obj
)
531 void exynos_drm_gem_prime_vunmap(struct drm_gem_object
*obj
, void *vaddr
)
536 int exynos_drm_gem_prime_mmap(struct drm_gem_object
*obj
,
537 struct vm_area_struct
*vma
)
541 ret
= drm_gem_mmap_obj(obj
, obj
->size
, vma
);
545 return exynos_drm_gem_mmap_obj(obj
, vma
);