2 * Copyright (C) 2012 Russell King
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 #include <linux/dma-buf.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/shmem_fs.h>
12 #include "armada_drm.h"
13 #include "armada_gem.h"
14 #include <drm/armada_drm.h>
15 #include "armada_ioctlP.h"
17 static int armada_gem_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
19 struct armada_gem_object
*obj
= drm_to_armada_gem(vma
->vm_private_data
);
20 unsigned long addr
= (unsigned long)vmf
->virtual_address
;
21 unsigned long pfn
= obj
->phys_addr
>> PAGE_SHIFT
;
24 pfn
+= (addr
- vma
->vm_start
) >> PAGE_SHIFT
;
25 ret
= vm_insert_pfn(vma
, addr
, pfn
);
30 return VM_FAULT_NOPAGE
;
34 return VM_FAULT_SIGBUS
;
38 const struct vm_operations_struct armada_gem_vm_ops
= {
39 .fault
= armada_gem_vm_fault
,
40 .open
= drm_gem_vm_open
,
41 .close
= drm_gem_vm_close
,
44 static size_t roundup_gem_size(size_t size
)
46 return roundup(size
, PAGE_SIZE
);
49 /* dev->struct_mutex is held here */
50 void armada_gem_free_object(struct drm_gem_object
*obj
)
52 struct armada_gem_object
*dobj
= drm_to_armada_gem(obj
);
54 DRM_DEBUG_DRIVER("release obj %p\n", dobj
);
56 drm_gem_free_mmap_offset(&dobj
->obj
);
59 /* page backed memory */
60 unsigned int order
= get_order(dobj
->obj
.size
);
61 __free_pages(dobj
->page
, order
);
62 } else if (dobj
->linear
) {
63 /* linear backed memory */
64 drm_mm_remove_node(dobj
->linear
);
70 if (dobj
->obj
.import_attach
) {
71 /* We only ever display imported data */
73 dma_buf_unmap_attachment(dobj
->obj
.import_attach
,
74 dobj
->sgt
, DMA_TO_DEVICE
);
75 drm_prime_gem_destroy(&dobj
->obj
, NULL
);
78 drm_gem_object_release(&dobj
->obj
);
84 armada_gem_linear_back(struct drm_device
*dev
, struct armada_gem_object
*obj
)
86 struct armada_private
*priv
= dev
->dev_private
;
87 size_t size
= obj
->obj
.size
;
89 if (obj
->page
|| obj
->linear
)
93 * If it is a small allocation (typically cursor, which will
94 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
95 * Framebuffers will never be this small (our minimum size for
96 * framebuffers is larger than this anyway.) Such objects are
97 * only accessed by the CPU so we don't need any special handing
101 unsigned int order
= get_order(size
);
102 struct page
*p
= alloc_pages(GFP_KERNEL
, order
);
105 obj
->addr
= page_address(p
);
106 obj
->phys_addr
= page_to_phys(p
);
109 memset(obj
->addr
, 0, PAGE_ALIGN(size
));
114 * We could grab something from CMA if it's enabled, but that
115 * involves building in a problem:
117 * CMA's interface uses dma_alloc_coherent(), which provides us
118 * with an CPU virtual address and a device address.
120 * The CPU virtual address may be either an address in the kernel
121 * direct mapped region (for example, as it would be on x86) or
122 * it may be remapped into another part of kernel memory space
123 * (eg, as it would be on ARM.) This means virt_to_phys() on the
124 * returned virtual address is invalid depending on the architecture
127 * The device address may also not be a physical address; it may
128 * be that there is some kind of remapping between the device and
129 * system RAM, which makes the use of the device address also
130 * unsafe to re-use as a physical address.
132 * This makes DRM usage of dma_alloc_coherent() in a generic way
133 * at best very questionable and unsafe.
136 /* Otherwise, grab it from our linear allocation */
138 struct drm_mm_node
*node
;
139 unsigned align
= min_t(unsigned, size
, SZ_2M
);
143 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
147 mutex_lock(&dev
->struct_mutex
);
148 ret
= drm_mm_insert_node(&priv
->linear
, node
, size
, align
,
149 DRM_MM_SEARCH_DEFAULT
);
150 mutex_unlock(&dev
->struct_mutex
);
158 /* Ensure that the memory we're returning is cleared. */
159 ptr
= ioremap_wc(obj
->linear
->start
, size
);
161 mutex_lock(&dev
->struct_mutex
);
162 drm_mm_remove_node(obj
->linear
);
163 mutex_unlock(&dev
->struct_mutex
);
169 memset_io(ptr
, 0, size
);
172 obj
->phys_addr
= obj
->linear
->start
;
173 obj
->dev_addr
= obj
->linear
->start
;
176 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj
,
177 (unsigned long long)obj
->phys_addr
,
178 (unsigned long long)obj
->dev_addr
);
184 armada_gem_map_object(struct drm_device
*dev
, struct armada_gem_object
*dobj
)
186 /* only linear objects need to be ioremap'd */
187 if (!dobj
->addr
&& dobj
->linear
)
188 dobj
->addr
= ioremap_wc(dobj
->phys_addr
, dobj
->obj
.size
);
192 struct armada_gem_object
*
193 armada_gem_alloc_private_object(struct drm_device
*dev
, size_t size
)
195 struct armada_gem_object
*obj
;
197 size
= roundup_gem_size(size
);
199 obj
= kzalloc(sizeof(*obj
), GFP_KERNEL
);
203 drm_gem_private_object_init(dev
, &obj
->obj
, size
);
204 obj
->dev_addr
= DMA_ERROR_CODE
;
206 DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj
, size
);
211 struct armada_gem_object
*armada_gem_alloc_object(struct drm_device
*dev
,
214 struct armada_gem_object
*obj
;
215 struct address_space
*mapping
;
217 size
= roundup_gem_size(size
);
219 obj
= kzalloc(sizeof(*obj
), GFP_KERNEL
);
223 if (drm_gem_object_init(dev
, &obj
->obj
, size
)) {
228 obj
->dev_addr
= DMA_ERROR_CODE
;
230 mapping
= file_inode(obj
->obj
.filp
)->i_mapping
;
231 mapping_set_gfp_mask(mapping
, GFP_HIGHUSER
| __GFP_RECLAIMABLE
);
233 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj
, size
);
238 /* Dumb alloc support */
239 int armada_gem_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
240 struct drm_mode_create_dumb
*args
)
242 struct armada_gem_object
*dobj
;
247 args
->pitch
= armada_pitch(args
->width
, args
->bpp
);
248 args
->size
= size
= args
->pitch
* args
->height
;
250 dobj
= armada_gem_alloc_private_object(dev
, size
);
254 ret
= armada_gem_linear_back(dev
, dobj
);
258 ret
= drm_gem_handle_create(file
, &dobj
->obj
, &handle
);
262 args
->handle
= handle
;
264 /* drop reference from allocate - handle holds it now */
265 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj
, size
, handle
);
267 drm_gem_object_unreference_unlocked(&dobj
->obj
);
271 int armada_gem_dumb_map_offset(struct drm_file
*file
, struct drm_device
*dev
,
272 uint32_t handle
, uint64_t *offset
)
274 struct armada_gem_object
*obj
;
277 mutex_lock(&dev
->struct_mutex
);
278 obj
= armada_gem_object_lookup(dev
, file
, handle
);
280 DRM_ERROR("failed to lookup gem object\n");
285 /* Don't allow imported objects to be mapped */
286 if (obj
->obj
.import_attach
) {
291 ret
= drm_gem_create_mmap_offset(&obj
->obj
);
293 *offset
= drm_vma_node_offset_addr(&obj
->obj
.vma_node
);
294 DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle
, *offset
);
297 drm_gem_object_unreference(&obj
->obj
);
299 mutex_unlock(&dev
->struct_mutex
);
304 int armada_gem_dumb_destroy(struct drm_file
*file
, struct drm_device
*dev
,
307 return drm_gem_handle_delete(file
, handle
);
310 /* Private driver gem ioctls */
311 int armada_gem_create_ioctl(struct drm_device
*dev
, void *data
,
312 struct drm_file
*file
)
314 struct drm_armada_gem_create
*args
= data
;
315 struct armada_gem_object
*dobj
;
325 dobj
= armada_gem_alloc_object(dev
, size
);
329 ret
= drm_gem_handle_create(file
, &dobj
->obj
, &handle
);
333 args
->handle
= handle
;
335 /* drop reference from allocate - handle holds it now */
336 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj
, size
, handle
);
338 drm_gem_object_unreference_unlocked(&dobj
->obj
);
342 /* Map a shmem-backed object into process memory space */
343 int armada_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
344 struct drm_file
*file
)
346 struct drm_armada_gem_mmap
*args
= data
;
347 struct armada_gem_object
*dobj
;
350 dobj
= armada_gem_object_lookup(dev
, file
, args
->handle
);
354 if (!dobj
->obj
.filp
) {
355 drm_gem_object_unreference(&dobj
->obj
);
359 addr
= vm_mmap(dobj
->obj
.filp
, 0, args
->size
, PROT_READ
| PROT_WRITE
,
360 MAP_SHARED
, args
->offset
);
361 drm_gem_object_unreference(&dobj
->obj
);
362 if (IS_ERR_VALUE(addr
))
370 int armada_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
371 struct drm_file
*file
)
373 struct drm_armada_gem_pwrite
*args
= data
;
374 struct armada_gem_object
*dobj
;
378 DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
379 args
->handle
, args
->offset
, args
->size
, args
->ptr
);
384 ptr
= (char __user
*)(uintptr_t)args
->ptr
;
386 if (!access_ok(VERIFY_READ
, ptr
, args
->size
))
389 ret
= fault_in_multipages_readable(ptr
, args
->size
);
393 dobj
= armada_gem_object_lookup(dev
, file
, args
->handle
);
397 /* Must be a kernel-mapped object */
401 if (args
->offset
> dobj
->obj
.size
||
402 args
->size
> dobj
->obj
.size
- args
->offset
) {
403 DRM_ERROR("invalid size: object size %u\n", dobj
->obj
.size
);
408 if (copy_from_user(dobj
->addr
+ args
->offset
, ptr
, args
->size
)) {
410 } else if (dobj
->update
) {
411 dobj
->update(dobj
->update_data
);
416 drm_gem_object_unreference_unlocked(&dobj
->obj
);
422 armada_gem_prime_map_dma_buf(struct dma_buf_attachment
*attach
,
423 enum dma_data_direction dir
)
425 struct drm_gem_object
*obj
= attach
->dmabuf
->priv
;
426 struct armada_gem_object
*dobj
= drm_to_armada_gem(obj
);
427 struct scatterlist
*sg
;
428 struct sg_table
*sgt
;
431 sgt
= kmalloc(sizeof(*sgt
), GFP_KERNEL
);
435 if (dobj
->obj
.filp
) {
436 struct address_space
*mapping
;
439 count
= dobj
->obj
.size
/ PAGE_SIZE
;
440 if (sg_alloc_table(sgt
, count
, GFP_KERNEL
))
443 mapping
= file_inode(dobj
->obj
.filp
)->i_mapping
;
445 for_each_sg(sgt
->sgl
, sg
, count
, i
) {
448 page
= shmem_read_mapping_page(mapping
, i
);
454 sg_set_page(sg
, page
, PAGE_SIZE
, 0);
457 if (dma_map_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
, dir
) == 0) {
461 } else if (dobj
->page
) {
462 /* Single contiguous page */
463 if (sg_alloc_table(sgt
, 1, GFP_KERNEL
))
466 sg_set_page(sgt
->sgl
, dobj
->page
, dobj
->obj
.size
, 0);
468 if (dma_map_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
, dir
) == 0)
470 } else if (dobj
->linear
) {
471 /* Single contiguous physical region - no struct page */
472 if (sg_alloc_table(sgt
, 1, GFP_KERNEL
))
474 sg_dma_address(sgt
->sgl
) = dobj
->dev_addr
;
475 sg_dma_len(sgt
->sgl
) = dobj
->obj
.size
;
482 for_each_sg(sgt
->sgl
, sg
, num
, i
)
483 page_cache_release(sg_page(sg
));
491 static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment
*attach
,
492 struct sg_table
*sgt
, enum dma_data_direction dir
)
494 struct drm_gem_object
*obj
= attach
->dmabuf
->priv
;
495 struct armada_gem_object
*dobj
= drm_to_armada_gem(obj
);
499 dma_unmap_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
, dir
);
501 if (dobj
->obj
.filp
) {
502 struct scatterlist
*sg
;
503 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
)
504 page_cache_release(sg_page(sg
));
511 static void *armada_gem_dmabuf_no_kmap(struct dma_buf
*buf
, unsigned long n
)
517 armada_gem_dmabuf_no_kunmap(struct dma_buf
*buf
, unsigned long n
, void *addr
)
522 armada_gem_dmabuf_mmap(struct dma_buf
*buf
, struct vm_area_struct
*vma
)
527 static const struct dma_buf_ops armada_gem_prime_dmabuf_ops
= {
528 .map_dma_buf
= armada_gem_prime_map_dma_buf
,
529 .unmap_dma_buf
= armada_gem_prime_unmap_dma_buf
,
530 .release
= drm_gem_dmabuf_release
,
531 .kmap_atomic
= armada_gem_dmabuf_no_kmap
,
532 .kunmap_atomic
= armada_gem_dmabuf_no_kunmap
,
533 .kmap
= armada_gem_dmabuf_no_kmap
,
534 .kunmap
= armada_gem_dmabuf_no_kunmap
,
535 .mmap
= armada_gem_dmabuf_mmap
,
539 armada_gem_prime_export(struct drm_device
*dev
, struct drm_gem_object
*obj
,
542 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
544 exp_info
.ops
= &armada_gem_prime_dmabuf_ops
;
545 exp_info
.size
= obj
->size
;
546 exp_info
.flags
= O_RDWR
;
549 return dma_buf_export(&exp_info
);
552 struct drm_gem_object
*
553 armada_gem_prime_import(struct drm_device
*dev
, struct dma_buf
*buf
)
555 struct dma_buf_attachment
*attach
;
556 struct armada_gem_object
*dobj
;
558 if (buf
->ops
== &armada_gem_prime_dmabuf_ops
) {
559 struct drm_gem_object
*obj
= buf
->priv
;
560 if (obj
->dev
== dev
) {
562 * Importing our own dmabuf(s) increases the
563 * refcount on the gem object itself.
565 drm_gem_object_reference(obj
);
570 attach
= dma_buf_attach(buf
, dev
->dev
);
572 return ERR_CAST(attach
);
574 dobj
= armada_gem_alloc_private_object(dev
, buf
->size
);
576 dma_buf_detach(buf
, attach
);
577 return ERR_PTR(-ENOMEM
);
580 dobj
->obj
.import_attach
= attach
;
584 * Don't call dma_buf_map_attachment() here - it maps the
585 * scatterlist immediately for DMA, and this is not always
586 * an appropriate thing to do.
591 int armada_gem_map_import(struct armada_gem_object
*dobj
)
595 dobj
->sgt
= dma_buf_map_attachment(dobj
->obj
.import_attach
,
598 DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
601 if (IS_ERR(dobj
->sgt
)) {
602 ret
= PTR_ERR(dobj
->sgt
);
604 DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret
);
607 if (dobj
->sgt
->nents
> 1) {
608 DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
611 if (sg_dma_len(dobj
->sgt
->sgl
) < dobj
->obj
.size
) {
612 DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
615 dobj
->dev_addr
= sg_dma_address(dobj
->sgt
->sgl
);