3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
13 #include <drm/drm_vma_manager.h>
15 #include <linux/shmem_fs.h>
16 #include <linux/dma-buf.h>
17 #include <drm/exynos_drm.h>
19 #include "exynos_drm_drv.h"
20 #include "exynos_drm_gem.h"
21 #include "exynos_drm_iommu.h"
23 static int exynos_drm_alloc_buf(struct exynos_drm_gem
*exynos_gem
)
25 struct drm_device
*dev
= exynos_gem
->base
.dev
;
27 unsigned int nr_pages
;
31 if (exynos_gem
->dma_addr
) {
32 DRM_DEBUG_KMS("already allocated.\n");
36 init_dma_attrs(&exynos_gem
->dma_attrs
);
39 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
40 * region will be allocated else physically contiguous
43 if (!(exynos_gem
->flags
& EXYNOS_BO_NONCONTIG
))
44 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS
, &exynos_gem
->dma_attrs
);
47 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
48 * else cachable mapping.
50 if (exynos_gem
->flags
& EXYNOS_BO_WC
||
51 !(exynos_gem
->flags
& EXYNOS_BO_CACHABLE
))
52 attr
= DMA_ATTR_WRITE_COMBINE
;
54 attr
= DMA_ATTR_NON_CONSISTENT
;
56 dma_set_attr(attr
, &exynos_gem
->dma_attrs
);
57 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING
, &exynos_gem
->dma_attrs
);
59 nr_pages
= exynos_gem
->size
>> PAGE_SHIFT
;
61 exynos_gem
->pages
= drm_calloc_large(nr_pages
, sizeof(struct page
*));
62 if (!exynos_gem
->pages
) {
63 DRM_ERROR("failed to allocate pages.\n");
67 exynos_gem
->cookie
= dma_alloc_attrs(dev
->dev
, exynos_gem
->size
,
68 &exynos_gem
->dma_addr
, GFP_KERNEL
,
69 &exynos_gem
->dma_attrs
);
70 if (!exynos_gem
->cookie
) {
71 DRM_ERROR("failed to allocate buffer.\n");
75 ret
= dma_get_sgtable_attrs(dev
->dev
, &sgt
, exynos_gem
->cookie
,
76 exynos_gem
->dma_addr
, exynos_gem
->size
,
77 &exynos_gem
->dma_attrs
);
79 DRM_ERROR("failed to get sgtable.\n");
83 if (drm_prime_sg_to_page_addr_arrays(&sgt
, exynos_gem
->pages
, NULL
,
85 DRM_ERROR("invalid sgtable.\n");
92 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
93 (unsigned long)exynos_gem
->dma_addr
, exynos_gem
->size
);
100 dma_free_attrs(dev
->dev
, exynos_gem
->size
, exynos_gem
->cookie
,
101 exynos_gem
->dma_addr
, &exynos_gem
->dma_attrs
);
103 drm_free_large(exynos_gem
->pages
);
108 static void exynos_drm_free_buf(struct exynos_drm_gem
*exynos_gem
)
110 struct drm_device
*dev
= exynos_gem
->base
.dev
;
112 if (!exynos_gem
->dma_addr
) {
113 DRM_DEBUG_KMS("dma_addr is invalid.\n");
117 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
118 (unsigned long)exynos_gem
->dma_addr
, exynos_gem
->size
);
120 dma_free_attrs(dev
->dev
, exynos_gem
->size
, exynos_gem
->cookie
,
121 (dma_addr_t
)exynos_gem
->dma_addr
,
122 &exynos_gem
->dma_attrs
);
124 drm_free_large(exynos_gem
->pages
);
127 static int exynos_drm_gem_handle_create(struct drm_gem_object
*obj
,
128 struct drm_file
*file_priv
,
129 unsigned int *handle
)
134 * allocate a id of idr table where the obj is registered
135 * and handle has the id what user can see.
137 ret
= drm_gem_handle_create(file_priv
, obj
, handle
);
141 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle
);
143 /* drop reference from allocate - handle holds it now. */
144 drm_gem_object_unreference_unlocked(obj
);
149 void exynos_drm_gem_destroy(struct exynos_drm_gem
*exynos_gem
)
151 struct drm_gem_object
*obj
= &exynos_gem
->base
;
153 DRM_DEBUG_KMS("handle count = %d\n", obj
->handle_count
);
156 * do not release memory region from exporter.
158 * the region will be released by exporter
159 * once dmabuf's refcount becomes 0.
161 if (obj
->import_attach
)
162 drm_prime_gem_destroy(obj
, exynos_gem
->sgt
);
164 exynos_drm_free_buf(exynos_gem
);
166 /* release file pointer to gem object. */
167 drm_gem_object_release(obj
);
172 unsigned long exynos_drm_gem_get_size(struct drm_device
*dev
,
173 unsigned int gem_handle
,
174 struct drm_file
*file_priv
)
176 struct exynos_drm_gem
*exynos_gem
;
177 struct drm_gem_object
*obj
;
179 obj
= drm_gem_object_lookup(dev
, file_priv
, gem_handle
);
181 DRM_ERROR("failed to lookup gem object.\n");
185 exynos_gem
= to_exynos_gem(obj
);
187 drm_gem_object_unreference_unlocked(obj
);
189 return exynos_gem
->size
;
192 static struct exynos_drm_gem
*exynos_drm_gem_init(struct drm_device
*dev
,
195 struct exynos_drm_gem
*exynos_gem
;
196 struct drm_gem_object
*obj
;
199 exynos_gem
= kzalloc(sizeof(*exynos_gem
), GFP_KERNEL
);
201 return ERR_PTR(-ENOMEM
);
203 exynos_gem
->size
= size
;
204 obj
= &exynos_gem
->base
;
206 ret
= drm_gem_object_init(dev
, obj
, size
);
208 DRM_ERROR("failed to initialize gem object\n");
213 ret
= drm_gem_create_mmap_offset(obj
);
215 drm_gem_object_release(obj
);
220 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj
->filp
);
225 struct exynos_drm_gem
*exynos_drm_gem_create(struct drm_device
*dev
,
229 struct exynos_drm_gem
*exynos_gem
;
232 if (flags
& ~(EXYNOS_BO_MASK
)) {
233 DRM_ERROR("invalid flags.\n");
234 return ERR_PTR(-EINVAL
);
238 DRM_ERROR("invalid size.\n");
239 return ERR_PTR(-EINVAL
);
242 size
= roundup(size
, PAGE_SIZE
);
244 exynos_gem
= exynos_drm_gem_init(dev
, size
);
245 if (IS_ERR(exynos_gem
))
248 /* set memory type and cache attribute from user side. */
249 exynos_gem
->flags
= flags
;
251 ret
= exynos_drm_alloc_buf(exynos_gem
);
253 drm_gem_object_release(&exynos_gem
->base
);
261 int exynos_drm_gem_create_ioctl(struct drm_device
*dev
, void *data
,
262 struct drm_file
*file_priv
)
264 struct drm_exynos_gem_create
*args
= data
;
265 struct exynos_drm_gem
*exynos_gem
;
268 exynos_gem
= exynos_drm_gem_create(dev
, args
->flags
, args
->size
);
269 if (IS_ERR(exynos_gem
))
270 return PTR_ERR(exynos_gem
);
272 ret
= exynos_drm_gem_handle_create(&exynos_gem
->base
, file_priv
,
275 exynos_drm_gem_destroy(exynos_gem
);
282 dma_addr_t
*exynos_drm_gem_get_dma_addr(struct drm_device
*dev
,
283 unsigned int gem_handle
,
284 struct drm_file
*filp
)
286 struct exynos_drm_gem
*exynos_gem
;
287 struct drm_gem_object
*obj
;
289 obj
= drm_gem_object_lookup(dev
, filp
, gem_handle
);
291 DRM_ERROR("failed to lookup gem object.\n");
292 return ERR_PTR(-EINVAL
);
295 exynos_gem
= to_exynos_gem(obj
);
297 return &exynos_gem
->dma_addr
;
300 void exynos_drm_gem_put_dma_addr(struct drm_device
*dev
,
301 unsigned int gem_handle
,
302 struct drm_file
*filp
)
304 struct drm_gem_object
*obj
;
306 obj
= drm_gem_object_lookup(dev
, filp
, gem_handle
);
308 DRM_ERROR("failed to lookup gem object.\n");
312 drm_gem_object_unreference_unlocked(obj
);
315 * decrease obj->refcount one more time because we has already
316 * increased it at exynos_drm_gem_get_dma_addr().
318 drm_gem_object_unreference_unlocked(obj
);
321 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem
*exynos_gem
,
322 struct vm_area_struct
*vma
)
324 struct drm_device
*drm_dev
= exynos_gem
->base
.dev
;
325 unsigned long vm_size
;
328 vma
->vm_flags
&= ~VM_PFNMAP
;
331 vm_size
= vma
->vm_end
- vma
->vm_start
;
333 /* check if user-requested size is valid. */
334 if (vm_size
> exynos_gem
->size
)
337 ret
= dma_mmap_attrs(drm_dev
->dev
, vma
, exynos_gem
->pages
,
338 exynos_gem
->dma_addr
, exynos_gem
->size
,
339 &exynos_gem
->dma_attrs
);
341 DRM_ERROR("failed to mmap.\n");
348 int exynos_drm_gem_get_ioctl(struct drm_device
*dev
, void *data
,
349 struct drm_file
*file_priv
)
351 struct exynos_drm_gem
*exynos_gem
;
352 struct drm_exynos_gem_info
*args
= data
;
353 struct drm_gem_object
*obj
;
355 mutex_lock(&dev
->struct_mutex
);
357 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
359 DRM_ERROR("failed to lookup gem object.\n");
360 mutex_unlock(&dev
->struct_mutex
);
364 exynos_gem
= to_exynos_gem(obj
);
366 args
->flags
= exynos_gem
->flags
;
367 args
->size
= exynos_gem
->size
;
369 drm_gem_object_unreference(obj
);
370 mutex_unlock(&dev
->struct_mutex
);
375 int exynos_gem_map_sgt_with_dma(struct drm_device
*drm_dev
,
376 struct sg_table
*sgt
,
377 enum dma_data_direction dir
)
381 mutex_lock(&drm_dev
->struct_mutex
);
383 nents
= dma_map_sg(drm_dev
->dev
, sgt
->sgl
, sgt
->nents
, dir
);
385 DRM_ERROR("failed to map sgl with dma.\n");
386 mutex_unlock(&drm_dev
->struct_mutex
);
390 mutex_unlock(&drm_dev
->struct_mutex
);
394 void exynos_gem_unmap_sgt_from_dma(struct drm_device
*drm_dev
,
395 struct sg_table
*sgt
,
396 enum dma_data_direction dir
)
398 dma_unmap_sg(drm_dev
->dev
, sgt
->sgl
, sgt
->nents
, dir
);
401 void exynos_drm_gem_free_object(struct drm_gem_object
*obj
)
403 exynos_drm_gem_destroy(to_exynos_gem(obj
));
406 int exynos_drm_gem_dumb_create(struct drm_file
*file_priv
,
407 struct drm_device
*dev
,
408 struct drm_mode_create_dumb
*args
)
410 struct exynos_drm_gem
*exynos_gem
;
415 * allocate memory to be used for framebuffer.
416 * - this callback would be called by user application
417 * with DRM_IOCTL_MODE_CREATE_DUMB command.
420 args
->pitch
= args
->width
* ((args
->bpp
+ 7) / 8);
421 args
->size
= args
->pitch
* args
->height
;
423 if (is_drm_iommu_supported(dev
))
424 flags
= EXYNOS_BO_NONCONTIG
| EXYNOS_BO_WC
;
426 flags
= EXYNOS_BO_CONTIG
| EXYNOS_BO_WC
;
428 exynos_gem
= exynos_drm_gem_create(dev
, flags
, args
->size
);
429 if (IS_ERR(exynos_gem
)) {
430 dev_warn(dev
->dev
, "FB allocation failed.\n");
431 return PTR_ERR(exynos_gem
);
434 ret
= exynos_drm_gem_handle_create(&exynos_gem
->base
, file_priv
,
437 exynos_drm_gem_destroy(exynos_gem
);
444 int exynos_drm_gem_dumb_map_offset(struct drm_file
*file_priv
,
445 struct drm_device
*dev
, uint32_t handle
,
448 struct drm_gem_object
*obj
;
451 mutex_lock(&dev
->struct_mutex
);
454 * get offset of memory allocated for drm framebuffer.
455 * - this callback would be called by user application
456 * with DRM_IOCTL_MODE_MAP_DUMB command.
459 obj
= drm_gem_object_lookup(dev
, file_priv
, handle
);
461 DRM_ERROR("failed to lookup gem object.\n");
466 *offset
= drm_vma_node_offset_addr(&obj
->vma_node
);
467 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset
);
469 drm_gem_object_unreference(obj
);
471 mutex_unlock(&dev
->struct_mutex
);
475 int exynos_drm_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
477 struct drm_gem_object
*obj
= vma
->vm_private_data
;
478 struct exynos_drm_gem
*exynos_gem
= to_exynos_gem(obj
);
483 page_offset
= ((unsigned long)vmf
->virtual_address
-
484 vma
->vm_start
) >> PAGE_SHIFT
;
486 if (page_offset
>= (exynos_gem
->size
>> PAGE_SHIFT
)) {
487 DRM_ERROR("invalid page offset\n");
492 pfn
= page_to_pfn(exynos_gem
->pages
[page_offset
]);
493 ret
= vm_insert_mixed(vma
, (unsigned long)vmf
->virtual_address
, pfn
);
500 return VM_FAULT_NOPAGE
;
504 return VM_FAULT_SIGBUS
;
508 int exynos_drm_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
510 struct exynos_drm_gem
*exynos_gem
;
511 struct drm_gem_object
*obj
;
514 /* set vm_area_struct. */
515 ret
= drm_gem_mmap(filp
, vma
);
517 DRM_ERROR("failed to mmap.\n");
521 obj
= vma
->vm_private_data
;
522 exynos_gem
= to_exynos_gem(obj
);
524 DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem
->flags
);
526 /* non-cachable as default. */
527 if (exynos_gem
->flags
& EXYNOS_BO_CACHABLE
)
528 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
529 else if (exynos_gem
->flags
& EXYNOS_BO_WC
)
531 pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
534 pgprot_noncached(vm_get_page_prot(vma
->vm_flags
));
536 ret
= exynos_drm_gem_mmap_buffer(exynos_gem
, vma
);
543 drm_gem_vm_close(vma
);
548 /* low-level interface prime helpers */
549 struct sg_table
*exynos_drm_gem_prime_get_sg_table(struct drm_gem_object
*obj
)
551 struct exynos_drm_gem
*exynos_gem
= to_exynos_gem(obj
);
554 npages
= exynos_gem
->size
>> PAGE_SHIFT
;
556 return drm_prime_pages_to_sg(exynos_gem
->pages
, npages
);
559 struct drm_gem_object
*
560 exynos_drm_gem_prime_import_sg_table(struct drm_device
*dev
,
561 struct dma_buf_attachment
*attach
,
562 struct sg_table
*sgt
)
564 struct exynos_drm_gem
*exynos_gem
;
568 exynos_gem
= exynos_drm_gem_init(dev
, attach
->dmabuf
->size
);
569 if (IS_ERR(exynos_gem
)) {
570 ret
= PTR_ERR(exynos_gem
);
574 exynos_gem
->dma_addr
= sg_dma_address(sgt
->sgl
);
576 npages
= exynos_gem
->size
>> PAGE_SHIFT
;
577 exynos_gem
->pages
= drm_malloc_ab(npages
, sizeof(struct page
*));
578 if (!exynos_gem
->pages
) {
583 ret
= drm_prime_sg_to_page_addr_arrays(sgt
, exynos_gem
->pages
, NULL
,
588 exynos_gem
->sgt
= sgt
;
590 if (sgt
->nents
== 1) {
591 /* always physically continuous memory if sgt->nents is 1. */
592 exynos_gem
->flags
|= EXYNOS_BO_CONTIG
;
595 * this case could be CONTIG or NONCONTIG type but for now
597 * TODO. we have to find a way that exporter can notify
598 * the type of its own buffer to importer.
600 exynos_gem
->flags
|= EXYNOS_BO_NONCONTIG
;
603 return &exynos_gem
->base
;
606 drm_free_large(exynos_gem
->pages
);
608 drm_gem_object_release(&exynos_gem
->base
);
613 void *exynos_drm_gem_prime_vmap(struct drm_gem_object
*obj
)
618 void exynos_drm_gem_prime_vunmap(struct drm_gem_object
*obj
, void *vaddr
)