3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
13 #include <drm/drm_vma_manager.h>
15 #include <linux/shmem_fs.h>
16 #include <linux/dma-buf.h>
17 #include <linux/pfn_t.h>
18 #include <drm/exynos_drm.h>
20 #include "exynos_drm_drv.h"
21 #include "exynos_drm_gem.h"
22 #include "exynos_drm_iommu.h"
24 static int exynos_drm_alloc_buf(struct exynos_drm_gem
*exynos_gem
)
26 struct drm_device
*dev
= exynos_gem
->base
.dev
;
28 unsigned int nr_pages
;
32 if (exynos_gem
->dma_addr
) {
33 DRM_DEBUG_KMS("already allocated.\n");
37 exynos_gem
->dma_attrs
= 0;
40 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
41 * region will be allocated else physically contiguous
44 if (!(exynos_gem
->flags
& EXYNOS_BO_NONCONTIG
))
45 exynos_gem
->dma_attrs
|= DMA_ATTR_FORCE_CONTIGUOUS
;
48 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
49 * else cachable mapping.
51 if (exynos_gem
->flags
& EXYNOS_BO_WC
||
52 !(exynos_gem
->flags
& EXYNOS_BO_CACHABLE
))
53 attr
= DMA_ATTR_WRITE_COMBINE
;
55 attr
= DMA_ATTR_NON_CONSISTENT
;
57 exynos_gem
->dma_attrs
|= attr
;
58 exynos_gem
->dma_attrs
|= DMA_ATTR_NO_KERNEL_MAPPING
;
60 nr_pages
= exynos_gem
->size
>> PAGE_SHIFT
;
62 exynos_gem
->pages
= kvmalloc_array(nr_pages
, sizeof(struct page
*),
63 GFP_KERNEL
| __GFP_ZERO
);
64 if (!exynos_gem
->pages
) {
65 DRM_ERROR("failed to allocate pages.\n");
69 exynos_gem
->cookie
= dma_alloc_attrs(to_dma_dev(dev
), exynos_gem
->size
,
70 &exynos_gem
->dma_addr
, GFP_KERNEL
,
71 exynos_gem
->dma_attrs
);
72 if (!exynos_gem
->cookie
) {
73 DRM_ERROR("failed to allocate buffer.\n");
77 ret
= dma_get_sgtable_attrs(to_dma_dev(dev
), &sgt
, exynos_gem
->cookie
,
78 exynos_gem
->dma_addr
, exynos_gem
->size
,
79 exynos_gem
->dma_attrs
);
81 DRM_ERROR("failed to get sgtable.\n");
85 if (drm_prime_sg_to_page_addr_arrays(&sgt
, exynos_gem
->pages
, NULL
,
87 DRM_ERROR("invalid sgtable.\n");
94 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
95 (unsigned long)exynos_gem
->dma_addr
, exynos_gem
->size
);
102 dma_free_attrs(to_dma_dev(dev
), exynos_gem
->size
, exynos_gem
->cookie
,
103 exynos_gem
->dma_addr
, exynos_gem
->dma_attrs
);
105 kvfree(exynos_gem
->pages
);
110 static void exynos_drm_free_buf(struct exynos_drm_gem
*exynos_gem
)
112 struct drm_device
*dev
= exynos_gem
->base
.dev
;
114 if (!exynos_gem
->dma_addr
) {
115 DRM_DEBUG_KMS("dma_addr is invalid.\n");
119 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
120 (unsigned long)exynos_gem
->dma_addr
, exynos_gem
->size
);
122 dma_free_attrs(to_dma_dev(dev
), exynos_gem
->size
, exynos_gem
->cookie
,
123 (dma_addr_t
)exynos_gem
->dma_addr
,
124 exynos_gem
->dma_attrs
);
126 kvfree(exynos_gem
->pages
);
129 static int exynos_drm_gem_handle_create(struct drm_gem_object
*obj
,
130 struct drm_file
*file_priv
,
131 unsigned int *handle
)
136 * allocate a id of idr table where the obj is registered
137 * and handle has the id what user can see.
139 ret
= drm_gem_handle_create(file_priv
, obj
, handle
);
143 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle
);
145 /* drop reference from allocate - handle holds it now. */
146 drm_gem_object_put_unlocked(obj
);
151 void exynos_drm_gem_destroy(struct exynos_drm_gem
*exynos_gem
)
153 struct drm_gem_object
*obj
= &exynos_gem
->base
;
155 DRM_DEBUG_KMS("handle count = %d\n", obj
->handle_count
);
158 * do not release memory region from exporter.
160 * the region will be released by exporter
161 * once dmabuf's refcount becomes 0.
163 if (obj
->import_attach
)
164 drm_prime_gem_destroy(obj
, exynos_gem
->sgt
);
166 exynos_drm_free_buf(exynos_gem
);
168 /* release file pointer to gem object. */
169 drm_gem_object_release(obj
);
174 static struct exynos_drm_gem
*exynos_drm_gem_init(struct drm_device
*dev
,
177 struct exynos_drm_gem
*exynos_gem
;
178 struct drm_gem_object
*obj
;
181 exynos_gem
= kzalloc(sizeof(*exynos_gem
), GFP_KERNEL
);
183 return ERR_PTR(-ENOMEM
);
185 exynos_gem
->size
= size
;
186 obj
= &exynos_gem
->base
;
188 ret
= drm_gem_object_init(dev
, obj
, size
);
190 DRM_ERROR("failed to initialize gem object\n");
195 ret
= drm_gem_create_mmap_offset(obj
);
197 drm_gem_object_release(obj
);
202 DRM_DEBUG_KMS("created file object = %pK\n", obj
->filp
);
207 struct exynos_drm_gem
*exynos_drm_gem_create(struct drm_device
*dev
,
211 struct exynos_drm_gem
*exynos_gem
;
214 if (flags
& ~(EXYNOS_BO_MASK
)) {
215 DRM_ERROR("invalid GEM buffer flags: %u\n", flags
);
216 return ERR_PTR(-EINVAL
);
220 DRM_ERROR("invalid GEM buffer size: %lu\n", size
);
221 return ERR_PTR(-EINVAL
);
224 size
= roundup(size
, PAGE_SIZE
);
226 exynos_gem
= exynos_drm_gem_init(dev
, size
);
227 if (IS_ERR(exynos_gem
))
230 if (!is_drm_iommu_supported(dev
) && (flags
& EXYNOS_BO_NONCONTIG
)) {
232 * when no IOMMU is available, all allocated buffers are
233 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
235 flags
&= ~EXYNOS_BO_NONCONTIG
;
236 DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
239 /* set memory type and cache attribute from user side. */
240 exynos_gem
->flags
= flags
;
242 ret
= exynos_drm_alloc_buf(exynos_gem
);
244 drm_gem_object_release(&exynos_gem
->base
);
252 int exynos_drm_gem_create_ioctl(struct drm_device
*dev
, void *data
,
253 struct drm_file
*file_priv
)
255 struct drm_exynos_gem_create
*args
= data
;
256 struct exynos_drm_gem
*exynos_gem
;
259 exynos_gem
= exynos_drm_gem_create(dev
, args
->flags
, args
->size
);
260 if (IS_ERR(exynos_gem
))
261 return PTR_ERR(exynos_gem
);
263 ret
= exynos_drm_gem_handle_create(&exynos_gem
->base
, file_priv
,
266 exynos_drm_gem_destroy(exynos_gem
);
273 int exynos_drm_gem_map_ioctl(struct drm_device
*dev
, void *data
,
274 struct drm_file
*file_priv
)
276 struct drm_exynos_gem_map
*args
= data
;
278 return drm_gem_dumb_map_offset(file_priv
, dev
, args
->handle
,
282 struct exynos_drm_gem
*exynos_drm_gem_get(struct drm_file
*filp
,
283 unsigned int gem_handle
)
285 struct drm_gem_object
*obj
;
287 obj
= drm_gem_object_lookup(filp
, gem_handle
);
290 return to_exynos_gem(obj
);
293 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem
*exynos_gem
,
294 struct vm_area_struct
*vma
)
296 struct drm_device
*drm_dev
= exynos_gem
->base
.dev
;
297 unsigned long vm_size
;
300 vma
->vm_flags
&= ~VM_PFNMAP
;
303 vm_size
= vma
->vm_end
- vma
->vm_start
;
305 /* check if user-requested size is valid. */
306 if (vm_size
> exynos_gem
->size
)
309 ret
= dma_mmap_attrs(to_dma_dev(drm_dev
), vma
, exynos_gem
->cookie
,
310 exynos_gem
->dma_addr
, exynos_gem
->size
,
311 exynos_gem
->dma_attrs
);
313 DRM_ERROR("failed to mmap.\n");
320 int exynos_drm_gem_get_ioctl(struct drm_device
*dev
, void *data
,
321 struct drm_file
*file_priv
)
323 struct exynos_drm_gem
*exynos_gem
;
324 struct drm_exynos_gem_info
*args
= data
;
325 struct drm_gem_object
*obj
;
327 obj
= drm_gem_object_lookup(file_priv
, args
->handle
);
329 DRM_ERROR("failed to lookup gem object.\n");
333 exynos_gem
= to_exynos_gem(obj
);
335 args
->flags
= exynos_gem
->flags
;
336 args
->size
= exynos_gem
->size
;
338 drm_gem_object_put_unlocked(obj
);
343 void exynos_drm_gem_free_object(struct drm_gem_object
*obj
)
345 exynos_drm_gem_destroy(to_exynos_gem(obj
));
348 int exynos_drm_gem_dumb_create(struct drm_file
*file_priv
,
349 struct drm_device
*dev
,
350 struct drm_mode_create_dumb
*args
)
352 struct exynos_drm_gem
*exynos_gem
;
357 * allocate memory to be used for framebuffer.
358 * - this callback would be called by user application
359 * with DRM_IOCTL_MODE_CREATE_DUMB command.
362 args
->pitch
= args
->width
* ((args
->bpp
+ 7) / 8);
363 args
->size
= args
->pitch
* args
->height
;
365 if (is_drm_iommu_supported(dev
))
366 flags
= EXYNOS_BO_NONCONTIG
| EXYNOS_BO_WC
;
368 flags
= EXYNOS_BO_CONTIG
| EXYNOS_BO_WC
;
370 exynos_gem
= exynos_drm_gem_create(dev
, flags
, args
->size
);
371 if (IS_ERR(exynos_gem
)) {
372 dev_warn(dev
->dev
, "FB allocation failed.\n");
373 return PTR_ERR(exynos_gem
);
376 ret
= exynos_drm_gem_handle_create(&exynos_gem
->base
, file_priv
,
379 exynos_drm_gem_destroy(exynos_gem
);
386 vm_fault_t
exynos_drm_gem_fault(struct vm_fault
*vmf
)
388 struct vm_area_struct
*vma
= vmf
->vma
;
389 struct drm_gem_object
*obj
= vma
->vm_private_data
;
390 struct exynos_drm_gem
*exynos_gem
= to_exynos_gem(obj
);
394 page_offset
= (vmf
->address
- vma
->vm_start
) >> PAGE_SHIFT
;
396 if (page_offset
>= (exynos_gem
->size
>> PAGE_SHIFT
)) {
397 DRM_ERROR("invalid page offset\n");
398 return VM_FAULT_SIGBUS
;
401 pfn
= page_to_pfn(exynos_gem
->pages
[page_offset
]);
402 return vmf_insert_mixed(vma
, vmf
->address
,
403 __pfn_to_pfn_t(pfn
, PFN_DEV
));
406 static int exynos_drm_gem_mmap_obj(struct drm_gem_object
*obj
,
407 struct vm_area_struct
*vma
)
409 struct exynos_drm_gem
*exynos_gem
= to_exynos_gem(obj
);
412 DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem
->flags
);
414 /* non-cachable as default. */
415 if (exynos_gem
->flags
& EXYNOS_BO_CACHABLE
)
416 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
417 else if (exynos_gem
->flags
& EXYNOS_BO_WC
)
419 pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
422 pgprot_noncached(vm_get_page_prot(vma
->vm_flags
));
424 ret
= exynos_drm_gem_mmap_buffer(exynos_gem
, vma
);
431 drm_gem_vm_close(vma
);
436 int exynos_drm_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
438 struct drm_gem_object
*obj
;
441 /* set vm_area_struct. */
442 ret
= drm_gem_mmap(filp
, vma
);
444 DRM_ERROR("failed to mmap.\n");
448 obj
= vma
->vm_private_data
;
450 if (obj
->import_attach
)
451 return dma_buf_mmap(obj
->dma_buf
, vma
, 0);
453 return exynos_drm_gem_mmap_obj(obj
, vma
);
456 /* low-level interface prime helpers */
457 struct drm_gem_object
*exynos_drm_gem_prime_import(struct drm_device
*dev
,
458 struct dma_buf
*dma_buf
)
460 return drm_gem_prime_import_dev(dev
, dma_buf
, to_dma_dev(dev
));
463 struct sg_table
*exynos_drm_gem_prime_get_sg_table(struct drm_gem_object
*obj
)
465 struct exynos_drm_gem
*exynos_gem
= to_exynos_gem(obj
);
468 npages
= exynos_gem
->size
>> PAGE_SHIFT
;
470 return drm_prime_pages_to_sg(exynos_gem
->pages
, npages
);
473 struct drm_gem_object
*
474 exynos_drm_gem_prime_import_sg_table(struct drm_device
*dev
,
475 struct dma_buf_attachment
*attach
,
476 struct sg_table
*sgt
)
478 struct exynos_drm_gem
*exynos_gem
;
482 exynos_gem
= exynos_drm_gem_init(dev
, attach
->dmabuf
->size
);
483 if (IS_ERR(exynos_gem
)) {
484 ret
= PTR_ERR(exynos_gem
);
488 exynos_gem
->dma_addr
= sg_dma_address(sgt
->sgl
);
490 npages
= exynos_gem
->size
>> PAGE_SHIFT
;
491 exynos_gem
->pages
= kvmalloc_array(npages
, sizeof(struct page
*), GFP_KERNEL
);
492 if (!exynos_gem
->pages
) {
497 ret
= drm_prime_sg_to_page_addr_arrays(sgt
, exynos_gem
->pages
, NULL
,
502 exynos_gem
->sgt
= sgt
;
504 if (sgt
->nents
== 1) {
505 /* always physically continuous memory if sgt->nents is 1. */
506 exynos_gem
->flags
|= EXYNOS_BO_CONTIG
;
509 * this case could be CONTIG or NONCONTIG type but for now
511 * TODO. we have to find a way that exporter can notify
512 * the type of its own buffer to importer.
514 exynos_gem
->flags
|= EXYNOS_BO_NONCONTIG
;
517 return &exynos_gem
->base
;
520 kvfree(exynos_gem
->pages
);
522 drm_gem_object_release(&exynos_gem
->base
);
527 void *exynos_drm_gem_prime_vmap(struct drm_gem_object
*obj
)
532 void exynos_drm_gem_prime_vunmap(struct drm_gem_object
*obj
, void *vaddr
)
537 int exynos_drm_gem_prime_mmap(struct drm_gem_object
*obj
,
538 struct vm_area_struct
*vma
)
542 ret
= drm_gem_mmap_obj(obj
, obj
->size
, vma
);
546 return exynos_drm_gem_mmap_obj(obj
, vma
);