3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
13 #include <drm/drm_vma_manager.h>
15 #include <linux/shmem_fs.h>
16 #include <linux/dma-buf.h>
17 #include <linux/pfn_t.h>
18 #include <drm/exynos_drm.h>
20 #include "exynos_drm_drv.h"
21 #include "exynos_drm_gem.h"
22 #include "exynos_drm_iommu.h"
24 static int exynos_drm_alloc_buf(struct exynos_drm_gem
*exynos_gem
)
26 struct drm_device
*dev
= exynos_gem
->base
.dev
;
28 unsigned int nr_pages
;
32 if (exynos_gem
->dma_addr
) {
33 DRM_DEBUG_KMS("already allocated.\n");
37 init_dma_attrs(&exynos_gem
->dma_attrs
);
40 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
41 * region will be allocated else physically contiguous
44 if (!(exynos_gem
->flags
& EXYNOS_BO_NONCONTIG
))
45 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS
, &exynos_gem
->dma_attrs
);
48 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
49 * else cachable mapping.
51 if (exynos_gem
->flags
& EXYNOS_BO_WC
||
52 !(exynos_gem
->flags
& EXYNOS_BO_CACHABLE
))
53 attr
= DMA_ATTR_WRITE_COMBINE
;
55 attr
= DMA_ATTR_NON_CONSISTENT
;
57 dma_set_attr(attr
, &exynos_gem
->dma_attrs
);
58 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING
, &exynos_gem
->dma_attrs
);
60 nr_pages
= exynos_gem
->size
>> PAGE_SHIFT
;
62 exynos_gem
->pages
= drm_calloc_large(nr_pages
, sizeof(struct page
*));
63 if (!exynos_gem
->pages
) {
64 DRM_ERROR("failed to allocate pages.\n");
68 exynos_gem
->cookie
= dma_alloc_attrs(to_dma_dev(dev
), exynos_gem
->size
,
69 &exynos_gem
->dma_addr
, GFP_KERNEL
,
70 &exynos_gem
->dma_attrs
);
71 if (!exynos_gem
->cookie
) {
72 DRM_ERROR("failed to allocate buffer.\n");
76 ret
= dma_get_sgtable_attrs(to_dma_dev(dev
), &sgt
, exynos_gem
->cookie
,
77 exynos_gem
->dma_addr
, exynos_gem
->size
,
78 &exynos_gem
->dma_attrs
);
80 DRM_ERROR("failed to get sgtable.\n");
84 if (drm_prime_sg_to_page_addr_arrays(&sgt
, exynos_gem
->pages
, NULL
,
86 DRM_ERROR("invalid sgtable.\n");
93 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
94 (unsigned long)exynos_gem
->dma_addr
, exynos_gem
->size
);
101 dma_free_attrs(to_dma_dev(dev
), exynos_gem
->size
, exynos_gem
->cookie
,
102 exynos_gem
->dma_addr
, &exynos_gem
->dma_attrs
);
104 drm_free_large(exynos_gem
->pages
);
109 static void exynos_drm_free_buf(struct exynos_drm_gem
*exynos_gem
)
111 struct drm_device
*dev
= exynos_gem
->base
.dev
;
113 if (!exynos_gem
->dma_addr
) {
114 DRM_DEBUG_KMS("dma_addr is invalid.\n");
118 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
119 (unsigned long)exynos_gem
->dma_addr
, exynos_gem
->size
);
121 dma_free_attrs(to_dma_dev(dev
), exynos_gem
->size
, exynos_gem
->cookie
,
122 (dma_addr_t
)exynos_gem
->dma_addr
,
123 &exynos_gem
->dma_attrs
);
125 drm_free_large(exynos_gem
->pages
);
128 static int exynos_drm_gem_handle_create(struct drm_gem_object
*obj
,
129 struct drm_file
*file_priv
,
130 unsigned int *handle
)
135 * allocate a id of idr table where the obj is registered
136 * and handle has the id what user can see.
138 ret
= drm_gem_handle_create(file_priv
, obj
, handle
);
142 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle
);
144 /* drop reference from allocate - handle holds it now. */
145 drm_gem_object_unreference_unlocked(obj
);
150 void exynos_drm_gem_destroy(struct exynos_drm_gem
*exynos_gem
)
152 struct drm_gem_object
*obj
= &exynos_gem
->base
;
154 DRM_DEBUG_KMS("handle count = %d\n", obj
->handle_count
);
157 * do not release memory region from exporter.
159 * the region will be released by exporter
160 * once dmabuf's refcount becomes 0.
162 if (obj
->import_attach
)
163 drm_prime_gem_destroy(obj
, exynos_gem
->sgt
);
165 exynos_drm_free_buf(exynos_gem
);
167 /* release file pointer to gem object. */
168 drm_gem_object_release(obj
);
173 unsigned long exynos_drm_gem_get_size(struct drm_device
*dev
,
174 unsigned int gem_handle
,
175 struct drm_file
*file_priv
)
177 struct exynos_drm_gem
*exynos_gem
;
178 struct drm_gem_object
*obj
;
180 obj
= drm_gem_object_lookup(dev
, file_priv
, gem_handle
);
182 DRM_ERROR("failed to lookup gem object.\n");
186 exynos_gem
= to_exynos_gem(obj
);
188 drm_gem_object_unreference_unlocked(obj
);
190 return exynos_gem
->size
;
193 static struct exynos_drm_gem
*exynos_drm_gem_init(struct drm_device
*dev
,
196 struct exynos_drm_gem
*exynos_gem
;
197 struct drm_gem_object
*obj
;
200 exynos_gem
= kzalloc(sizeof(*exynos_gem
), GFP_KERNEL
);
202 return ERR_PTR(-ENOMEM
);
204 exynos_gem
->size
= size
;
205 obj
= &exynos_gem
->base
;
207 ret
= drm_gem_object_init(dev
, obj
, size
);
209 DRM_ERROR("failed to initialize gem object\n");
214 ret
= drm_gem_create_mmap_offset(obj
);
216 drm_gem_object_release(obj
);
221 DRM_DEBUG_KMS("created file object = %p\n", obj
->filp
);
226 struct exynos_drm_gem
*exynos_drm_gem_create(struct drm_device
*dev
,
230 struct exynos_drm_gem
*exynos_gem
;
233 if (flags
& ~(EXYNOS_BO_MASK
)) {
234 DRM_ERROR("invalid flags.\n");
235 return ERR_PTR(-EINVAL
);
239 DRM_ERROR("invalid size.\n");
240 return ERR_PTR(-EINVAL
);
243 size
= roundup(size
, PAGE_SIZE
);
245 exynos_gem
= exynos_drm_gem_init(dev
, size
);
246 if (IS_ERR(exynos_gem
))
249 /* set memory type and cache attribute from user side. */
250 exynos_gem
->flags
= flags
;
252 ret
= exynos_drm_alloc_buf(exynos_gem
);
254 drm_gem_object_release(&exynos_gem
->base
);
262 int exynos_drm_gem_create_ioctl(struct drm_device
*dev
, void *data
,
263 struct drm_file
*file_priv
)
265 struct drm_exynos_gem_create
*args
= data
;
266 struct exynos_drm_gem
*exynos_gem
;
269 exynos_gem
= exynos_drm_gem_create(dev
, args
->flags
, args
->size
);
270 if (IS_ERR(exynos_gem
))
271 return PTR_ERR(exynos_gem
);
273 ret
= exynos_drm_gem_handle_create(&exynos_gem
->base
, file_priv
,
276 exynos_drm_gem_destroy(exynos_gem
);
283 int exynos_drm_gem_map_ioctl(struct drm_device
*dev
, void *data
,
284 struct drm_file
*file_priv
)
286 struct drm_exynos_gem_map
*args
= data
;
288 return exynos_drm_gem_dumb_map_offset(file_priv
, dev
, args
->handle
,
292 dma_addr_t
*exynos_drm_gem_get_dma_addr(struct drm_device
*dev
,
293 unsigned int gem_handle
,
294 struct drm_file
*filp
)
296 struct exynos_drm_gem
*exynos_gem
;
297 struct drm_gem_object
*obj
;
299 obj
= drm_gem_object_lookup(dev
, filp
, gem_handle
);
301 DRM_ERROR("failed to lookup gem object.\n");
302 return ERR_PTR(-EINVAL
);
305 exynos_gem
= to_exynos_gem(obj
);
307 return &exynos_gem
->dma_addr
;
310 void exynos_drm_gem_put_dma_addr(struct drm_device
*dev
,
311 unsigned int gem_handle
,
312 struct drm_file
*filp
)
314 struct drm_gem_object
*obj
;
316 obj
= drm_gem_object_lookup(dev
, filp
, gem_handle
);
318 DRM_ERROR("failed to lookup gem object.\n");
322 drm_gem_object_unreference_unlocked(obj
);
325 * decrease obj->refcount one more time because we has already
326 * increased it at exynos_drm_gem_get_dma_addr().
328 drm_gem_object_unreference_unlocked(obj
);
331 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem
*exynos_gem
,
332 struct vm_area_struct
*vma
)
334 struct drm_device
*drm_dev
= exynos_gem
->base
.dev
;
335 unsigned long vm_size
;
338 vma
->vm_flags
&= ~VM_PFNMAP
;
341 vm_size
= vma
->vm_end
- vma
->vm_start
;
343 /* check if user-requested size is valid. */
344 if (vm_size
> exynos_gem
->size
)
347 ret
= dma_mmap_attrs(to_dma_dev(drm_dev
), vma
, exynos_gem
->cookie
,
348 exynos_gem
->dma_addr
, exynos_gem
->size
,
349 &exynos_gem
->dma_attrs
);
351 DRM_ERROR("failed to mmap.\n");
358 int exynos_drm_gem_get_ioctl(struct drm_device
*dev
, void *data
,
359 struct drm_file
*file_priv
)
361 struct exynos_drm_gem
*exynos_gem
;
362 struct drm_exynos_gem_info
*args
= data
;
363 struct drm_gem_object
*obj
;
365 mutex_lock(&dev
->struct_mutex
);
367 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
369 DRM_ERROR("failed to lookup gem object.\n");
370 mutex_unlock(&dev
->struct_mutex
);
374 exynos_gem
= to_exynos_gem(obj
);
376 args
->flags
= exynos_gem
->flags
;
377 args
->size
= exynos_gem
->size
;
379 drm_gem_object_unreference(obj
);
380 mutex_unlock(&dev
->struct_mutex
);
385 int exynos_gem_map_sgt_with_dma(struct drm_device
*drm_dev
,
386 struct sg_table
*sgt
,
387 enum dma_data_direction dir
)
391 mutex_lock(&drm_dev
->struct_mutex
);
393 nents
= dma_map_sg(to_dma_dev(drm_dev
), sgt
->sgl
, sgt
->nents
, dir
);
395 DRM_ERROR("failed to map sgl with dma.\n");
396 mutex_unlock(&drm_dev
->struct_mutex
);
400 mutex_unlock(&drm_dev
->struct_mutex
);
404 void exynos_gem_unmap_sgt_from_dma(struct drm_device
*drm_dev
,
405 struct sg_table
*sgt
,
406 enum dma_data_direction dir
)
408 dma_unmap_sg(to_dma_dev(drm_dev
), sgt
->sgl
, sgt
->nents
, dir
);
411 void exynos_drm_gem_free_object(struct drm_gem_object
*obj
)
413 exynos_drm_gem_destroy(to_exynos_gem(obj
));
416 int exynos_drm_gem_dumb_create(struct drm_file
*file_priv
,
417 struct drm_device
*dev
,
418 struct drm_mode_create_dumb
*args
)
420 struct exynos_drm_gem
*exynos_gem
;
425 * allocate memory to be used for framebuffer.
426 * - this callback would be called by user application
427 * with DRM_IOCTL_MODE_CREATE_DUMB command.
430 args
->pitch
= args
->width
* ((args
->bpp
+ 7) / 8);
431 args
->size
= args
->pitch
* args
->height
;
433 if (is_drm_iommu_supported(dev
))
434 flags
= EXYNOS_BO_NONCONTIG
| EXYNOS_BO_WC
;
436 flags
= EXYNOS_BO_CONTIG
| EXYNOS_BO_WC
;
438 exynos_gem
= exynos_drm_gem_create(dev
, flags
, args
->size
);
439 if (IS_ERR(exynos_gem
)) {
440 dev_warn(dev
->dev
, "FB allocation failed.\n");
441 return PTR_ERR(exynos_gem
);
444 ret
= exynos_drm_gem_handle_create(&exynos_gem
->base
, file_priv
,
447 exynos_drm_gem_destroy(exynos_gem
);
454 int exynos_drm_gem_dumb_map_offset(struct drm_file
*file_priv
,
455 struct drm_device
*dev
, uint32_t handle
,
458 struct drm_gem_object
*obj
;
461 mutex_lock(&dev
->struct_mutex
);
464 * get offset of memory allocated for drm framebuffer.
465 * - this callback would be called by user application
466 * with DRM_IOCTL_MODE_MAP_DUMB command.
469 obj
= drm_gem_object_lookup(dev
, file_priv
, handle
);
471 DRM_ERROR("failed to lookup gem object.\n");
476 *offset
= drm_vma_node_offset_addr(&obj
->vma_node
);
477 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset
);
479 drm_gem_object_unreference(obj
);
481 mutex_unlock(&dev
->struct_mutex
);
485 int exynos_drm_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
487 struct drm_gem_object
*obj
= vma
->vm_private_data
;
488 struct exynos_drm_gem
*exynos_gem
= to_exynos_gem(obj
);
493 page_offset
= ((unsigned long)vmf
->virtual_address
-
494 vma
->vm_start
) >> PAGE_SHIFT
;
496 if (page_offset
>= (exynos_gem
->size
>> PAGE_SHIFT
)) {
497 DRM_ERROR("invalid page offset\n");
502 pfn
= page_to_pfn(exynos_gem
->pages
[page_offset
]);
503 ret
= vm_insert_mixed(vma
, (unsigned long)vmf
->virtual_address
,
504 __pfn_to_pfn_t(pfn
, PFN_DEV
));
511 return VM_FAULT_NOPAGE
;
515 return VM_FAULT_SIGBUS
;
519 int exynos_drm_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
521 struct exynos_drm_gem
*exynos_gem
;
522 struct drm_gem_object
*obj
;
525 /* set vm_area_struct. */
526 ret
= drm_gem_mmap(filp
, vma
);
528 DRM_ERROR("failed to mmap.\n");
532 obj
= vma
->vm_private_data
;
533 exynos_gem
= to_exynos_gem(obj
);
535 DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem
->flags
);
537 /* non-cachable as default. */
538 if (exynos_gem
->flags
& EXYNOS_BO_CACHABLE
)
539 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
540 else if (exynos_gem
->flags
& EXYNOS_BO_WC
)
542 pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
545 pgprot_noncached(vm_get_page_prot(vma
->vm_flags
));
547 ret
= exynos_drm_gem_mmap_buffer(exynos_gem
, vma
);
554 drm_gem_vm_close(vma
);
559 /* low-level interface prime helpers */
560 struct sg_table
*exynos_drm_gem_prime_get_sg_table(struct drm_gem_object
*obj
)
562 struct exynos_drm_gem
*exynos_gem
= to_exynos_gem(obj
);
565 npages
= exynos_gem
->size
>> PAGE_SHIFT
;
567 return drm_prime_pages_to_sg(exynos_gem
->pages
, npages
);
570 struct drm_gem_object
*
571 exynos_drm_gem_prime_import_sg_table(struct drm_device
*dev
,
572 struct dma_buf_attachment
*attach
,
573 struct sg_table
*sgt
)
575 struct exynos_drm_gem
*exynos_gem
;
579 exynos_gem
= exynos_drm_gem_init(dev
, attach
->dmabuf
->size
);
580 if (IS_ERR(exynos_gem
)) {
581 ret
= PTR_ERR(exynos_gem
);
585 exynos_gem
->dma_addr
= sg_dma_address(sgt
->sgl
);
587 npages
= exynos_gem
->size
>> PAGE_SHIFT
;
588 exynos_gem
->pages
= drm_malloc_ab(npages
, sizeof(struct page
*));
589 if (!exynos_gem
->pages
) {
594 ret
= drm_prime_sg_to_page_addr_arrays(sgt
, exynos_gem
->pages
, NULL
,
599 exynos_gem
->sgt
= sgt
;
601 if (sgt
->nents
== 1) {
602 /* always physically continuous memory if sgt->nents is 1. */
603 exynos_gem
->flags
|= EXYNOS_BO_CONTIG
;
606 * this case could be CONTIG or NONCONTIG type but for now
608 * TODO. we have to find a way that exporter can notify
609 * the type of its own buffer to importer.
611 exynos_gem
->flags
|= EXYNOS_BO_NONCONTIG
;
614 return &exynos_gem
->base
;
617 drm_free_large(exynos_gem
->pages
);
619 drm_gem_object_release(&exynos_gem
->base
);
624 void *exynos_drm_gem_prime_vmap(struct drm_gem_object
*obj
)
629 void exynos_drm_gem_prime_vunmap(struct drm_gem_object
*obj
, void *vaddr
)