1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 Russell King
6 #include <linux/dma-buf.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/mman.h>
9 #include <linux/shmem_fs.h>
11 #include <drm/armada_drm.h>
12 #include <drm/drm_prime.h>
14 #include "armada_drm.h"
15 #include "armada_gem.h"
16 #include "armada_ioctlP.h"
18 static vm_fault_t
armada_gem_vm_fault(struct vm_fault
*vmf
)
20 struct drm_gem_object
*gobj
= vmf
->vma
->vm_private_data
;
21 struct armada_gem_object
*obj
= drm_to_armada_gem(gobj
);
22 unsigned long pfn
= obj
->phys_addr
>> PAGE_SHIFT
;
24 pfn
+= (vmf
->address
- vmf
->vma
->vm_start
) >> PAGE_SHIFT
;
25 return vmf_insert_pfn(vmf
->vma
, vmf
->address
, pfn
);
28 static const struct vm_operations_struct armada_gem_vm_ops
= {
29 .fault
= armada_gem_vm_fault
,
30 .open
= drm_gem_vm_open
,
31 .close
= drm_gem_vm_close
,
34 static size_t roundup_gem_size(size_t size
)
36 return roundup(size
, PAGE_SIZE
);
39 void armada_gem_free_object(struct drm_gem_object
*obj
)
41 struct armada_gem_object
*dobj
= drm_to_armada_gem(obj
);
42 struct armada_private
*priv
= drm_to_armada_dev(obj
->dev
);
44 DRM_DEBUG_DRIVER("release obj %p\n", dobj
);
46 drm_gem_free_mmap_offset(&dobj
->obj
);
48 might_lock(&priv
->linear_lock
);
51 /* page backed memory */
52 unsigned int order
= get_order(dobj
->obj
.size
);
53 __free_pages(dobj
->page
, order
);
54 } else if (dobj
->linear
) {
55 /* linear backed memory */
56 mutex_lock(&priv
->linear_lock
);
57 drm_mm_remove_node(dobj
->linear
);
58 mutex_unlock(&priv
->linear_lock
);
64 if (dobj
->obj
.import_attach
) {
65 /* We only ever display imported data */
67 dma_buf_unmap_attachment(dobj
->obj
.import_attach
,
68 dobj
->sgt
, DMA_TO_DEVICE
);
69 drm_prime_gem_destroy(&dobj
->obj
, NULL
);
72 drm_gem_object_release(&dobj
->obj
);
78 armada_gem_linear_back(struct drm_device
*dev
, struct armada_gem_object
*obj
)
80 struct armada_private
*priv
= drm_to_armada_dev(dev
);
81 size_t size
= obj
->obj
.size
;
83 if (obj
->page
|| obj
->linear
)
87 * If it is a small allocation (typically cursor, which will
88 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
89 * Framebuffers will never be this small (our minimum size for
90 * framebuffers is larger than this anyway.) Such objects are
91 * only accessed by the CPU so we don't need any special handing
95 unsigned int order
= get_order(size
);
96 struct page
*p
= alloc_pages(GFP_KERNEL
, order
);
99 obj
->addr
= page_address(p
);
100 obj
->phys_addr
= page_to_phys(p
);
103 memset(obj
->addr
, 0, PAGE_ALIGN(size
));
108 * We could grab something from CMA if it's enabled, but that
109 * involves building in a problem:
111 * CMA's interface uses dma_alloc_coherent(), which provides us
112 * with an CPU virtual address and a device address.
114 * The CPU virtual address may be either an address in the kernel
115 * direct mapped region (for example, as it would be on x86) or
116 * it may be remapped into another part of kernel memory space
117 * (eg, as it would be on ARM.) This means virt_to_phys() on the
118 * returned virtual address is invalid depending on the architecture
121 * The device address may also not be a physical address; it may
122 * be that there is some kind of remapping between the device and
123 * system RAM, which makes the use of the device address also
124 * unsafe to re-use as a physical address.
126 * This makes DRM usage of dma_alloc_coherent() in a generic way
127 * at best very questionable and unsafe.
130 /* Otherwise, grab it from our linear allocation */
132 struct drm_mm_node
*node
;
133 unsigned align
= min_t(unsigned, size
, SZ_2M
);
137 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
141 mutex_lock(&priv
->linear_lock
);
142 ret
= drm_mm_insert_node_generic(&priv
->linear
, node
,
144 mutex_unlock(&priv
->linear_lock
);
152 /* Ensure that the memory we're returning is cleared. */
153 ptr
= ioremap_wc(obj
->linear
->start
, size
);
155 mutex_lock(&priv
->linear_lock
);
156 drm_mm_remove_node(obj
->linear
);
157 mutex_unlock(&priv
->linear_lock
);
163 memset_io(ptr
, 0, size
);
166 obj
->phys_addr
= obj
->linear
->start
;
167 obj
->dev_addr
= obj
->linear
->start
;
171 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj
,
172 (unsigned long long)obj
->phys_addr
,
173 (unsigned long long)obj
->dev_addr
);
179 armada_gem_map_object(struct drm_device
*dev
, struct armada_gem_object
*dobj
)
181 /* only linear objects need to be ioremap'd */
182 if (!dobj
->addr
&& dobj
->linear
)
183 dobj
->addr
= ioremap_wc(dobj
->phys_addr
, dobj
->obj
.size
);
187 static const struct drm_gem_object_funcs armada_gem_object_funcs
= {
188 .free
= armada_gem_free_object
,
189 .export
= armada_gem_prime_export
,
190 .vm_ops
= &armada_gem_vm_ops
,
193 struct armada_gem_object
*
194 armada_gem_alloc_private_object(struct drm_device
*dev
, size_t size
)
196 struct armada_gem_object
*obj
;
198 size
= roundup_gem_size(size
);
200 obj
= kzalloc(sizeof(*obj
), GFP_KERNEL
);
204 obj
->obj
.funcs
= &armada_gem_object_funcs
;
206 drm_gem_private_object_init(dev
, &obj
->obj
, size
);
208 DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj
, size
);
213 static struct armada_gem_object
*armada_gem_alloc_object(struct drm_device
*dev
,
216 struct armada_gem_object
*obj
;
217 struct address_space
*mapping
;
219 size
= roundup_gem_size(size
);
221 obj
= kzalloc(sizeof(*obj
), GFP_KERNEL
);
225 obj
->obj
.funcs
= &armada_gem_object_funcs
;
227 if (drm_gem_object_init(dev
, &obj
->obj
, size
)) {
232 mapping
= obj
->obj
.filp
->f_mapping
;
233 mapping_set_gfp_mask(mapping
, GFP_HIGHUSER
| __GFP_RECLAIMABLE
);
235 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj
, size
);
240 /* Dumb alloc support */
241 int armada_gem_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
242 struct drm_mode_create_dumb
*args
)
244 struct armada_gem_object
*dobj
;
249 args
->pitch
= armada_pitch(args
->width
, args
->bpp
);
250 args
->size
= size
= args
->pitch
* args
->height
;
252 dobj
= armada_gem_alloc_private_object(dev
, size
);
256 ret
= armada_gem_linear_back(dev
, dobj
);
260 ret
= drm_gem_handle_create(file
, &dobj
->obj
, &handle
);
264 args
->handle
= handle
;
266 /* drop reference from allocate - handle holds it now */
267 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj
, size
, handle
);
269 drm_gem_object_put(&dobj
->obj
);
273 /* Private driver gem ioctls */
274 int armada_gem_create_ioctl(struct drm_device
*dev
, void *data
,
275 struct drm_file
*file
)
277 struct drm_armada_gem_create
*args
= data
;
278 struct armada_gem_object
*dobj
;
288 dobj
= armada_gem_alloc_object(dev
, size
);
292 ret
= drm_gem_handle_create(file
, &dobj
->obj
, &handle
);
296 args
->handle
= handle
;
298 /* drop reference from allocate - handle holds it now */
299 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj
, size
, handle
);
301 drm_gem_object_put(&dobj
->obj
);
305 /* Map a shmem-backed object into process memory space */
306 int armada_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
307 struct drm_file
*file
)
309 struct drm_armada_gem_mmap
*args
= data
;
310 struct armada_gem_object
*dobj
;
313 dobj
= armada_gem_object_lookup(file
, args
->handle
);
317 if (!dobj
->obj
.filp
) {
318 drm_gem_object_put(&dobj
->obj
);
322 addr
= vm_mmap(dobj
->obj
.filp
, 0, args
->size
, PROT_READ
| PROT_WRITE
,
323 MAP_SHARED
, args
->offset
);
324 drm_gem_object_put(&dobj
->obj
);
325 if (IS_ERR_VALUE(addr
))
333 int armada_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
334 struct drm_file
*file
)
336 struct drm_armada_gem_pwrite
*args
= data
;
337 struct armada_gem_object
*dobj
;
341 DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
342 args
->handle
, args
->offset
, args
->size
, args
->ptr
);
347 ptr
= (char __user
*)(uintptr_t)args
->ptr
;
349 if (!access_ok(ptr
, args
->size
))
352 ret
= fault_in_pages_readable(ptr
, args
->size
);
356 dobj
= armada_gem_object_lookup(file
, args
->handle
);
360 /* Must be a kernel-mapped object */
364 if (args
->offset
> dobj
->obj
.size
||
365 args
->size
> dobj
->obj
.size
- args
->offset
) {
366 DRM_ERROR("invalid size: object size %u\n", dobj
->obj
.size
);
371 if (copy_from_user(dobj
->addr
+ args
->offset
, ptr
, args
->size
)) {
373 } else if (dobj
->update
) {
374 dobj
->update(dobj
->update_data
);
379 drm_gem_object_put(&dobj
->obj
);
384 static struct sg_table
*
385 armada_gem_prime_map_dma_buf(struct dma_buf_attachment
*attach
,
386 enum dma_data_direction dir
)
388 struct drm_gem_object
*obj
= attach
->dmabuf
->priv
;
389 struct armada_gem_object
*dobj
= drm_to_armada_gem(obj
);
390 struct scatterlist
*sg
;
391 struct sg_table
*sgt
;
394 sgt
= kmalloc(sizeof(*sgt
), GFP_KERNEL
);
398 if (dobj
->obj
.filp
) {
399 struct address_space
*mapping
;
402 count
= dobj
->obj
.size
/ PAGE_SIZE
;
403 if (sg_alloc_table(sgt
, count
, GFP_KERNEL
))
406 mapping
= dobj
->obj
.filp
->f_mapping
;
408 for_each_sgtable_sg(sgt
, sg
, i
) {
411 page
= shmem_read_mapping_page(mapping
, i
);
415 sg_set_page(sg
, page
, PAGE_SIZE
, 0);
418 if (dma_map_sgtable(attach
->dev
, sgt
, dir
, 0))
420 } else if (dobj
->page
) {
421 /* Single contiguous page */
422 if (sg_alloc_table(sgt
, 1, GFP_KERNEL
))
425 sg_set_page(sgt
->sgl
, dobj
->page
, dobj
->obj
.size
, 0);
427 if (dma_map_sgtable(attach
->dev
, sgt
, dir
, 0))
429 } else if (dobj
->linear
) {
430 /* Single contiguous physical region - no struct page */
431 if (sg_alloc_table(sgt
, 1, GFP_KERNEL
))
433 sg_dma_address(sgt
->sgl
) = dobj
->dev_addr
;
434 sg_dma_len(sgt
->sgl
) = dobj
->obj
.size
;
441 for_each_sgtable_sg(sgt
, sg
, i
)
443 put_page(sg_page(sg
));
451 static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment
*attach
,
452 struct sg_table
*sgt
, enum dma_data_direction dir
)
454 struct drm_gem_object
*obj
= attach
->dmabuf
->priv
;
455 struct armada_gem_object
*dobj
= drm_to_armada_gem(obj
);
459 dma_unmap_sgtable(attach
->dev
, sgt
, dir
, 0);
461 if (dobj
->obj
.filp
) {
462 struct scatterlist
*sg
;
464 for_each_sgtable_sg(sgt
, sg
, i
)
465 put_page(sg_page(sg
));
473 armada_gem_dmabuf_mmap(struct dma_buf
*buf
, struct vm_area_struct
*vma
)
478 static const struct dma_buf_ops armada_gem_prime_dmabuf_ops
= {
479 .map_dma_buf
= armada_gem_prime_map_dma_buf
,
480 .unmap_dma_buf
= armada_gem_prime_unmap_dma_buf
,
481 .release
= drm_gem_dmabuf_release
,
482 .mmap
= armada_gem_dmabuf_mmap
,
486 armada_gem_prime_export(struct drm_gem_object
*obj
, int flags
)
488 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
490 exp_info
.ops
= &armada_gem_prime_dmabuf_ops
;
491 exp_info
.size
= obj
->size
;
492 exp_info
.flags
= O_RDWR
;
495 return drm_gem_dmabuf_export(obj
->dev
, &exp_info
);
498 struct drm_gem_object
*
499 armada_gem_prime_import(struct drm_device
*dev
, struct dma_buf
*buf
)
501 struct dma_buf_attachment
*attach
;
502 struct armada_gem_object
*dobj
;
504 if (buf
->ops
== &armada_gem_prime_dmabuf_ops
) {
505 struct drm_gem_object
*obj
= buf
->priv
;
506 if (obj
->dev
== dev
) {
508 * Importing our own dmabuf(s) increases the
509 * refcount on the gem object itself.
511 drm_gem_object_get(obj
);
516 attach
= dma_buf_attach(buf
, dev
->dev
);
518 return ERR_CAST(attach
);
520 dobj
= armada_gem_alloc_private_object(dev
, buf
->size
);
522 dma_buf_detach(buf
, attach
);
523 return ERR_PTR(-ENOMEM
);
526 dobj
->obj
.import_attach
= attach
;
530 * Don't call dma_buf_map_attachment() here - it maps the
531 * scatterlist immediately for DMA, and this is not always
532 * an appropriate thing to do.
537 int armada_gem_map_import(struct armada_gem_object
*dobj
)
541 dobj
->sgt
= dma_buf_map_attachment(dobj
->obj
.import_attach
,
543 if (IS_ERR(dobj
->sgt
)) {
544 ret
= PTR_ERR(dobj
->sgt
);
546 DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret
);
549 if (dobj
->sgt
->nents
> 1) {
550 DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
553 if (sg_dma_len(dobj
->sgt
->sgl
) < dobj
->obj
.size
) {
554 DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
557 dobj
->dev_addr
= sg_dma_address(dobj
->sgt
->sgl
);