Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / gpu / drm / armada / armada_gem.c
blob21909642ee4ca9ec3d4529afca3ff5289018dc08
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012 Russell King
4 */
6 #include <linux/dma-buf.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/mman.h>
9 #include <linux/shmem_fs.h>
11 #include <drm/armada_drm.h>
12 #include <drm/drm_prime.h>
14 #include "armada_drm.h"
15 #include "armada_gem.h"
16 #include "armada_ioctlP.h"
18 static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
20 struct drm_gem_object *gobj = vmf->vma->vm_private_data;
21 struct armada_gem_object *obj = drm_to_armada_gem(gobj);
22 unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
24 pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
25 return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
28 static const struct vm_operations_struct armada_gem_vm_ops = {
29 .fault = armada_gem_vm_fault,
30 .open = drm_gem_vm_open,
31 .close = drm_gem_vm_close,
34 static size_t roundup_gem_size(size_t size)
36 return roundup(size, PAGE_SIZE);
39 void armada_gem_free_object(struct drm_gem_object *obj)
41 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
42 struct armada_private *priv = drm_to_armada_dev(obj->dev);
44 DRM_DEBUG_DRIVER("release obj %p\n", dobj);
46 drm_gem_free_mmap_offset(&dobj->obj);
48 might_lock(&priv->linear_lock);
50 if (dobj->page) {
51 /* page backed memory */
52 unsigned int order = get_order(dobj->obj.size);
53 __free_pages(dobj->page, order);
54 } else if (dobj->linear) {
55 /* linear backed memory */
56 mutex_lock(&priv->linear_lock);
57 drm_mm_remove_node(dobj->linear);
58 mutex_unlock(&priv->linear_lock);
59 kfree(dobj->linear);
60 if (dobj->addr)
61 iounmap(dobj->addr);
64 if (dobj->obj.import_attach) {
65 /* We only ever display imported data */
66 if (dobj->sgt)
67 dma_buf_unmap_attachment(dobj->obj.import_attach,
68 dobj->sgt, DMA_TO_DEVICE);
69 drm_prime_gem_destroy(&dobj->obj, NULL);
72 drm_gem_object_release(&dobj->obj);
74 kfree(dobj);
77 int
78 armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
80 struct armada_private *priv = drm_to_armada_dev(dev);
81 size_t size = obj->obj.size;
83 if (obj->page || obj->linear)
84 return 0;
87 * If it is a small allocation (typically cursor, which will
88 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
89 * Framebuffers will never be this small (our minimum size for
90 * framebuffers is larger than this anyway.) Such objects are
91 * only accessed by the CPU so we don't need any special handing
92 * here.
94 if (size <= 8192) {
95 unsigned int order = get_order(size);
96 struct page *p = alloc_pages(GFP_KERNEL, order);
98 if (p) {
99 obj->addr = page_address(p);
100 obj->phys_addr = page_to_phys(p);
101 obj->page = p;
103 memset(obj->addr, 0, PAGE_ALIGN(size));
108 * We could grab something from CMA if it's enabled, but that
109 * involves building in a problem:
111 * CMA's interface uses dma_alloc_coherent(), which provides us
112 * with an CPU virtual address and a device address.
114 * The CPU virtual address may be either an address in the kernel
115 * direct mapped region (for example, as it would be on x86) or
116 * it may be remapped into another part of kernel memory space
117 * (eg, as it would be on ARM.) This means virt_to_phys() on the
118 * returned virtual address is invalid depending on the architecture
119 * implementation.
121 * The device address may also not be a physical address; it may
122 * be that there is some kind of remapping between the device and
123 * system RAM, which makes the use of the device address also
124 * unsafe to re-use as a physical address.
126 * This makes DRM usage of dma_alloc_coherent() in a generic way
127 * at best very questionable and unsafe.
130 /* Otherwise, grab it from our linear allocation */
131 if (!obj->page) {
132 struct drm_mm_node *node;
133 unsigned align = min_t(unsigned, size, SZ_2M);
134 void __iomem *ptr;
135 int ret;
137 node = kzalloc(sizeof(*node), GFP_KERNEL);
138 if (!node)
139 return -ENOSPC;
141 mutex_lock(&priv->linear_lock);
142 ret = drm_mm_insert_node_generic(&priv->linear, node,
143 size, align, 0, 0);
144 mutex_unlock(&priv->linear_lock);
145 if (ret) {
146 kfree(node);
147 return ret;
150 obj->linear = node;
152 /* Ensure that the memory we're returning is cleared. */
153 ptr = ioremap_wc(obj->linear->start, size);
154 if (!ptr) {
155 mutex_lock(&priv->linear_lock);
156 drm_mm_remove_node(obj->linear);
157 mutex_unlock(&priv->linear_lock);
158 kfree(obj->linear);
159 obj->linear = NULL;
160 return -ENOMEM;
163 memset_io(ptr, 0, size);
164 iounmap(ptr);
166 obj->phys_addr = obj->linear->start;
167 obj->dev_addr = obj->linear->start;
168 obj->mapped = true;
171 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
172 (unsigned long long)obj->phys_addr,
173 (unsigned long long)obj->dev_addr);
175 return 0;
178 void *
179 armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
181 /* only linear objects need to be ioremap'd */
182 if (!dobj->addr && dobj->linear)
183 dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
184 return dobj->addr;
187 static const struct drm_gem_object_funcs armada_gem_object_funcs = {
188 .free = armada_gem_free_object,
189 .export = armada_gem_prime_export,
190 .vm_ops = &armada_gem_vm_ops,
193 struct armada_gem_object *
194 armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
196 struct armada_gem_object *obj;
198 size = roundup_gem_size(size);
200 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
201 if (!obj)
202 return NULL;
204 obj->obj.funcs = &armada_gem_object_funcs;
206 drm_gem_private_object_init(dev, &obj->obj, size);
208 DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
210 return obj;
213 static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
214 size_t size)
216 struct armada_gem_object *obj;
217 struct address_space *mapping;
219 size = roundup_gem_size(size);
221 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
222 if (!obj)
223 return NULL;
225 obj->obj.funcs = &armada_gem_object_funcs;
227 if (drm_gem_object_init(dev, &obj->obj, size)) {
228 kfree(obj);
229 return NULL;
232 mapping = obj->obj.filp->f_mapping;
233 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
235 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
237 return obj;
240 /* Dumb alloc support */
241 int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
242 struct drm_mode_create_dumb *args)
244 struct armada_gem_object *dobj;
245 u32 handle;
246 size_t size;
247 int ret;
249 args->pitch = armada_pitch(args->width, args->bpp);
250 args->size = size = args->pitch * args->height;
252 dobj = armada_gem_alloc_private_object(dev, size);
253 if (dobj == NULL)
254 return -ENOMEM;
256 ret = armada_gem_linear_back(dev, dobj);
257 if (ret)
258 goto err;
260 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
261 if (ret)
262 goto err;
264 args->handle = handle;
266 /* drop reference from allocate - handle holds it now */
267 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
268 err:
269 drm_gem_object_put(&dobj->obj);
270 return ret;
273 /* Private driver gem ioctls */
274 int armada_gem_create_ioctl(struct drm_device *dev, void *data,
275 struct drm_file *file)
277 struct drm_armada_gem_create *args = data;
278 struct armada_gem_object *dobj;
279 size_t size;
280 u32 handle;
281 int ret;
283 if (args->size == 0)
284 return -ENOMEM;
286 size = args->size;
288 dobj = armada_gem_alloc_object(dev, size);
289 if (dobj == NULL)
290 return -ENOMEM;
292 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
293 if (ret)
294 goto err;
296 args->handle = handle;
298 /* drop reference from allocate - handle holds it now */
299 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
300 err:
301 drm_gem_object_put(&dobj->obj);
302 return ret;
305 /* Map a shmem-backed object into process memory space */
306 int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
307 struct drm_file *file)
309 struct drm_armada_gem_mmap *args = data;
310 struct armada_gem_object *dobj;
311 unsigned long addr;
313 dobj = armada_gem_object_lookup(file, args->handle);
314 if (dobj == NULL)
315 return -ENOENT;
317 if (!dobj->obj.filp) {
318 drm_gem_object_put(&dobj->obj);
319 return -EINVAL;
322 addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
323 MAP_SHARED, args->offset);
324 drm_gem_object_put(&dobj->obj);
325 if (IS_ERR_VALUE(addr))
326 return addr;
328 args->addr = addr;
330 return 0;
333 int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
334 struct drm_file *file)
336 struct drm_armada_gem_pwrite *args = data;
337 struct armada_gem_object *dobj;
338 char __user *ptr;
339 int ret;
341 DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
342 args->handle, args->offset, args->size, args->ptr);
344 if (args->size == 0)
345 return 0;
347 ptr = (char __user *)(uintptr_t)args->ptr;
349 if (!access_ok(ptr, args->size))
350 return -EFAULT;
352 ret = fault_in_pages_readable(ptr, args->size);
353 if (ret)
354 return ret;
356 dobj = armada_gem_object_lookup(file, args->handle);
357 if (dobj == NULL)
358 return -ENOENT;
360 /* Must be a kernel-mapped object */
361 if (!dobj->addr)
362 return -EINVAL;
364 if (args->offset > dobj->obj.size ||
365 args->size > dobj->obj.size - args->offset) {
366 DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
367 ret = -EINVAL;
368 goto unref;
371 if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
372 ret = -EFAULT;
373 } else if (dobj->update) {
374 dobj->update(dobj->update_data);
375 ret = 0;
378 unref:
379 drm_gem_object_put(&dobj->obj);
380 return ret;
383 /* Prime support */
384 static struct sg_table *
385 armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
386 enum dma_data_direction dir)
388 struct drm_gem_object *obj = attach->dmabuf->priv;
389 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
390 struct scatterlist *sg;
391 struct sg_table *sgt;
392 int i;
394 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
395 if (!sgt)
396 return NULL;
398 if (dobj->obj.filp) {
399 struct address_space *mapping;
400 int count;
402 count = dobj->obj.size / PAGE_SIZE;
403 if (sg_alloc_table(sgt, count, GFP_KERNEL))
404 goto free_sgt;
406 mapping = dobj->obj.filp->f_mapping;
408 for_each_sgtable_sg(sgt, sg, i) {
409 struct page *page;
411 page = shmem_read_mapping_page(mapping, i);
412 if (IS_ERR(page))
413 goto release;
415 sg_set_page(sg, page, PAGE_SIZE, 0);
418 if (dma_map_sgtable(attach->dev, sgt, dir, 0))
419 goto release;
420 } else if (dobj->page) {
421 /* Single contiguous page */
422 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
423 goto free_sgt;
425 sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
427 if (dma_map_sgtable(attach->dev, sgt, dir, 0))
428 goto free_table;
429 } else if (dobj->linear) {
430 /* Single contiguous physical region - no struct page */
431 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
432 goto free_sgt;
433 sg_dma_address(sgt->sgl) = dobj->dev_addr;
434 sg_dma_len(sgt->sgl) = dobj->obj.size;
435 } else {
436 goto free_sgt;
438 return sgt;
440 release:
441 for_each_sgtable_sg(sgt, sg, i)
442 if (sg_page(sg))
443 put_page(sg_page(sg));
444 free_table:
445 sg_free_table(sgt);
446 free_sgt:
447 kfree(sgt);
448 return NULL;
451 static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
452 struct sg_table *sgt, enum dma_data_direction dir)
454 struct drm_gem_object *obj = attach->dmabuf->priv;
455 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
456 int i;
458 if (!dobj->linear)
459 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
461 if (dobj->obj.filp) {
462 struct scatterlist *sg;
464 for_each_sgtable_sg(sgt, sg, i)
465 put_page(sg_page(sg));
468 sg_free_table(sgt);
469 kfree(sgt);
472 static int
473 armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
475 return -EINVAL;
478 static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
479 .map_dma_buf = armada_gem_prime_map_dma_buf,
480 .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
481 .release = drm_gem_dmabuf_release,
482 .mmap = armada_gem_dmabuf_mmap,
485 struct dma_buf *
486 armada_gem_prime_export(struct drm_gem_object *obj, int flags)
488 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
490 exp_info.ops = &armada_gem_prime_dmabuf_ops;
491 exp_info.size = obj->size;
492 exp_info.flags = O_RDWR;
493 exp_info.priv = obj;
495 return drm_gem_dmabuf_export(obj->dev, &exp_info);
498 struct drm_gem_object *
499 armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
501 struct dma_buf_attachment *attach;
502 struct armada_gem_object *dobj;
504 if (buf->ops == &armada_gem_prime_dmabuf_ops) {
505 struct drm_gem_object *obj = buf->priv;
506 if (obj->dev == dev) {
508 * Importing our own dmabuf(s) increases the
509 * refcount on the gem object itself.
511 drm_gem_object_get(obj);
512 return obj;
516 attach = dma_buf_attach(buf, dev->dev);
517 if (IS_ERR(attach))
518 return ERR_CAST(attach);
520 dobj = armada_gem_alloc_private_object(dev, buf->size);
521 if (!dobj) {
522 dma_buf_detach(buf, attach);
523 return ERR_PTR(-ENOMEM);
526 dobj->obj.import_attach = attach;
527 get_dma_buf(buf);
530 * Don't call dma_buf_map_attachment() here - it maps the
531 * scatterlist immediately for DMA, and this is not always
532 * an appropriate thing to do.
534 return &dobj->obj;
537 int armada_gem_map_import(struct armada_gem_object *dobj)
539 int ret;
541 dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
542 DMA_TO_DEVICE);
543 if (IS_ERR(dobj->sgt)) {
544 ret = PTR_ERR(dobj->sgt);
545 dobj->sgt = NULL;
546 DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
547 return ret;
549 if (dobj->sgt->nents > 1) {
550 DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
551 return -EINVAL;
553 if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
554 DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
555 return -EINVAL;
557 dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
558 dobj->mapped = true;
559 return 0;