treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / tegra / gem.c
blob1237df157e05e8a5f202d7f92e4dd356f14743c5
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * NVIDIA Tegra DRM GEM helper functions
5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
8 * Based on the GEM/CMA helpers
10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
13 #include <linux/dma-buf.h>
14 #include <linux/iommu.h>
16 #include <drm/drm_drv.h>
17 #include <drm/drm_prime.h>
18 #include <drm/tegra_drm.h>
20 #include "drm.h"
21 #include "gem.h"
23 static void tegra_bo_put(struct host1x_bo *bo)
25 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
27 drm_gem_object_put_unlocked(&obj->gem);
30 /* XXX move this into lib/scatterlist.c? */
31 static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
32 unsigned int nents, gfp_t gfp_mask)
34 struct scatterlist *dst;
35 unsigned int i;
36 int err;
38 err = sg_alloc_table(sgt, nents, gfp_mask);
39 if (err < 0)
40 return err;
42 dst = sgt->sgl;
44 for (i = 0; i < nents; i++) {
45 sg_set_page(dst, sg_page(sg), sg->length, 0);
46 dst = sg_next(dst);
47 sg = sg_next(sg);
50 return 0;
53 static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
54 dma_addr_t *phys)
56 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
57 struct sg_table *sgt;
58 int err;
61 * If we've manually mapped the buffer object through the IOMMU, make
62 * sure to return the IOVA address of our mapping.
64 if (phys && obj->mm) {
65 *phys = obj->iova;
66 return NULL;
70 * If we don't have a mapping for this buffer yet, return an SG table
71 * so that host1x can do the mapping for us via the DMA API.
73 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
74 if (!sgt)
75 return ERR_PTR(-ENOMEM);
77 if (obj->pages) {
79 * If the buffer object was allocated from the explicit IOMMU
80 * API code paths, construct an SG table from the pages.
82 err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
83 0, obj->gem.size, GFP_KERNEL);
84 if (err < 0)
85 goto free;
86 } else if (obj->sgt) {
88 * If the buffer object already has an SG table but no pages
89 * were allocated for it, it means the buffer was imported and
90 * the SG table needs to be copied to avoid overwriting any
91 * other potential users of the original SG table.
93 err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents,
94 GFP_KERNEL);
95 if (err < 0)
96 goto free;
97 } else {
99 * If the buffer object had no pages allocated and if it was
100 * not imported, it had to be allocated with the DMA API, so
101 * the DMA API helper can be used.
103 err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
104 obj->gem.size);
105 if (err < 0)
106 goto free;
109 return sgt;
111 free:
112 kfree(sgt);
113 return ERR_PTR(err);
116 static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
118 if (sgt) {
119 sg_free_table(sgt);
120 kfree(sgt);
124 static void *tegra_bo_mmap(struct host1x_bo *bo)
126 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
128 if (obj->vaddr)
129 return obj->vaddr;
130 else if (obj->gem.import_attach)
131 return dma_buf_vmap(obj->gem.import_attach->dmabuf);
132 else
133 return vmap(obj->pages, obj->num_pages, VM_MAP,
134 pgprot_writecombine(PAGE_KERNEL));
137 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
139 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
141 if (obj->vaddr)
142 return;
143 else if (obj->gem.import_attach)
144 dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
145 else
146 vunmap(addr);
149 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
151 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
153 drm_gem_object_get(&obj->gem);
155 return bo;
158 static const struct host1x_bo_ops tegra_bo_ops = {
159 .get = tegra_bo_get,
160 .put = tegra_bo_put,
161 .pin = tegra_bo_pin,
162 .unpin = tegra_bo_unpin,
163 .mmap = tegra_bo_mmap,
164 .munmap = tegra_bo_munmap,
167 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
169 int prot = IOMMU_READ | IOMMU_WRITE;
170 int err;
172 if (bo->mm)
173 return -EBUSY;
175 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
176 if (!bo->mm)
177 return -ENOMEM;
179 mutex_lock(&tegra->mm_lock);
181 err = drm_mm_insert_node_generic(&tegra->mm,
182 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
183 if (err < 0) {
184 dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
185 err);
186 goto unlock;
189 bo->iova = bo->mm->start;
191 bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
192 bo->sgt->nents, prot);
193 if (!bo->size) {
194 dev_err(tegra->drm->dev, "failed to map buffer\n");
195 err = -ENOMEM;
196 goto remove;
199 mutex_unlock(&tegra->mm_lock);
201 return 0;
203 remove:
204 drm_mm_remove_node(bo->mm);
205 unlock:
206 mutex_unlock(&tegra->mm_lock);
207 kfree(bo->mm);
208 return err;
211 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
213 if (!bo->mm)
214 return 0;
216 mutex_lock(&tegra->mm_lock);
217 iommu_unmap(tegra->domain, bo->iova, bo->size);
218 drm_mm_remove_node(bo->mm);
219 mutex_unlock(&tegra->mm_lock);
221 kfree(bo->mm);
223 return 0;
226 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
227 size_t size)
229 struct tegra_bo *bo;
230 int err;
232 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
233 if (!bo)
234 return ERR_PTR(-ENOMEM);
236 host1x_bo_init(&bo->base, &tegra_bo_ops);
237 size = round_up(size, PAGE_SIZE);
239 err = drm_gem_object_init(drm, &bo->gem, size);
240 if (err < 0)
241 goto free;
243 err = drm_gem_create_mmap_offset(&bo->gem);
244 if (err < 0)
245 goto release;
247 return bo;
249 release:
250 drm_gem_object_release(&bo->gem);
251 free:
252 kfree(bo);
253 return ERR_PTR(err);
256 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
258 if (bo->pages) {
259 dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
260 DMA_FROM_DEVICE);
261 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
262 sg_free_table(bo->sgt);
263 kfree(bo->sgt);
264 } else if (bo->vaddr) {
265 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
269 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
271 int err;
273 bo->pages = drm_gem_get_pages(&bo->gem);
274 if (IS_ERR(bo->pages))
275 return PTR_ERR(bo->pages);
277 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
279 bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
280 if (IS_ERR(bo->sgt)) {
281 err = PTR_ERR(bo->sgt);
282 goto put_pages;
285 err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
286 DMA_FROM_DEVICE);
287 if (err == 0) {
288 err = -EFAULT;
289 goto free_sgt;
292 return 0;
294 free_sgt:
295 sg_free_table(bo->sgt);
296 kfree(bo->sgt);
297 put_pages:
298 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
299 return err;
302 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
304 struct tegra_drm *tegra = drm->dev_private;
305 int err;
307 if (tegra->domain) {
308 err = tegra_bo_get_pages(drm, bo);
309 if (err < 0)
310 return err;
312 err = tegra_bo_iommu_map(tegra, bo);
313 if (err < 0) {
314 tegra_bo_free(drm, bo);
315 return err;
317 } else {
318 size_t size = bo->gem.size;
320 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
321 GFP_KERNEL | __GFP_NOWARN);
322 if (!bo->vaddr) {
323 dev_err(drm->dev,
324 "failed to allocate buffer of size %zu\n",
325 size);
326 return -ENOMEM;
330 return 0;
333 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
334 unsigned long flags)
336 struct tegra_bo *bo;
337 int err;
339 bo = tegra_bo_alloc_object(drm, size);
340 if (IS_ERR(bo))
341 return bo;
343 err = tegra_bo_alloc(drm, bo);
344 if (err < 0)
345 goto release;
347 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
348 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
350 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
351 bo->flags |= TEGRA_BO_BOTTOM_UP;
353 return bo;
355 release:
356 drm_gem_object_release(&bo->gem);
357 kfree(bo);
358 return ERR_PTR(err);
361 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
362 struct drm_device *drm,
363 size_t size,
364 unsigned long flags,
365 u32 *handle)
367 struct tegra_bo *bo;
368 int err;
370 bo = tegra_bo_create(drm, size, flags);
371 if (IS_ERR(bo))
372 return bo;
374 err = drm_gem_handle_create(file, &bo->gem, handle);
375 if (err) {
376 tegra_bo_free_object(&bo->gem);
377 return ERR_PTR(err);
380 drm_gem_object_put_unlocked(&bo->gem);
382 return bo;
385 static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
386 struct dma_buf *buf)
388 struct tegra_drm *tegra = drm->dev_private;
389 struct dma_buf_attachment *attach;
390 struct tegra_bo *bo;
391 int err;
393 bo = tegra_bo_alloc_object(drm, buf->size);
394 if (IS_ERR(bo))
395 return bo;
397 attach = dma_buf_attach(buf, drm->dev);
398 if (IS_ERR(attach)) {
399 err = PTR_ERR(attach);
400 goto free;
403 get_dma_buf(buf);
405 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
406 if (IS_ERR(bo->sgt)) {
407 err = PTR_ERR(bo->sgt);
408 goto detach;
411 if (tegra->domain) {
412 err = tegra_bo_iommu_map(tegra, bo);
413 if (err < 0)
414 goto detach;
417 bo->gem.import_attach = attach;
419 return bo;
421 detach:
422 if (!IS_ERR_OR_NULL(bo->sgt))
423 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
425 dma_buf_detach(buf, attach);
426 dma_buf_put(buf);
427 free:
428 drm_gem_object_release(&bo->gem);
429 kfree(bo);
430 return ERR_PTR(err);
433 void tegra_bo_free_object(struct drm_gem_object *gem)
435 struct tegra_drm *tegra = gem->dev->dev_private;
436 struct tegra_bo *bo = to_tegra_bo(gem);
438 if (tegra->domain)
439 tegra_bo_iommu_unmap(tegra, bo);
441 if (gem->import_attach) {
442 dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
443 DMA_TO_DEVICE);
444 drm_prime_gem_destroy(gem, NULL);
445 } else {
446 tegra_bo_free(gem->dev, bo);
449 drm_gem_object_release(gem);
450 kfree(bo);
453 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
454 struct drm_mode_create_dumb *args)
456 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
457 struct tegra_drm *tegra = drm->dev_private;
458 struct tegra_bo *bo;
460 args->pitch = round_up(min_pitch, tegra->pitch_align);
461 args->size = args->pitch * args->height;
463 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
464 &args->handle);
465 if (IS_ERR(bo))
466 return PTR_ERR(bo);
468 return 0;
471 static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
473 struct vm_area_struct *vma = vmf->vma;
474 struct drm_gem_object *gem = vma->vm_private_data;
475 struct tegra_bo *bo = to_tegra_bo(gem);
476 struct page *page;
477 pgoff_t offset;
479 if (!bo->pages)
480 return VM_FAULT_SIGBUS;
482 offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
483 page = bo->pages[offset];
485 return vmf_insert_page(vma, vmf->address, page);
488 const struct vm_operations_struct tegra_bo_vm_ops = {
489 .fault = tegra_bo_fault,
490 .open = drm_gem_vm_open,
491 .close = drm_gem_vm_close,
494 int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
496 struct tegra_bo *bo = to_tegra_bo(gem);
498 if (!bo->pages) {
499 unsigned long vm_pgoff = vma->vm_pgoff;
500 int err;
503 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
504 * and set the vm_pgoff (used as a fake buffer offset by DRM)
505 * to 0 as we want to map the whole buffer.
507 vma->vm_flags &= ~VM_PFNMAP;
508 vma->vm_pgoff = 0;
510 err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
511 gem->size);
512 if (err < 0) {
513 drm_gem_vm_close(vma);
514 return err;
517 vma->vm_pgoff = vm_pgoff;
518 } else {
519 pgprot_t prot = vm_get_page_prot(vma->vm_flags);
521 vma->vm_flags |= VM_MIXEDMAP;
522 vma->vm_flags &= ~VM_PFNMAP;
524 vma->vm_page_prot = pgprot_writecombine(prot);
527 return 0;
530 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
532 struct drm_gem_object *gem;
533 int err;
535 err = drm_gem_mmap(file, vma);
536 if (err < 0)
537 return err;
539 gem = vma->vm_private_data;
541 return __tegra_gem_mmap(gem, vma);
544 static struct sg_table *
545 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
546 enum dma_data_direction dir)
548 struct drm_gem_object *gem = attach->dmabuf->priv;
549 struct tegra_bo *bo = to_tegra_bo(gem);
550 struct sg_table *sgt;
552 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
553 if (!sgt)
554 return NULL;
556 if (bo->pages) {
557 if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
558 0, gem->size, GFP_KERNEL) < 0)
559 goto free;
560 } else {
561 if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
562 gem->size) < 0)
563 goto free;
566 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
567 goto free;
569 return sgt;
571 free:
572 sg_free_table(sgt);
573 kfree(sgt);
574 return NULL;
577 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
578 struct sg_table *sgt,
579 enum dma_data_direction dir)
581 struct drm_gem_object *gem = attach->dmabuf->priv;
582 struct tegra_bo *bo = to_tegra_bo(gem);
584 if (bo->pages)
585 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
587 sg_free_table(sgt);
588 kfree(sgt);
591 static void tegra_gem_prime_release(struct dma_buf *buf)
593 drm_gem_dmabuf_release(buf);
596 static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
597 enum dma_data_direction direction)
599 struct drm_gem_object *gem = buf->priv;
600 struct tegra_bo *bo = to_tegra_bo(gem);
601 struct drm_device *drm = gem->dev;
603 if (bo->pages)
604 dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents,
605 DMA_FROM_DEVICE);
607 return 0;
610 static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
611 enum dma_data_direction direction)
613 struct drm_gem_object *gem = buf->priv;
614 struct tegra_bo *bo = to_tegra_bo(gem);
615 struct drm_device *drm = gem->dev;
617 if (bo->pages)
618 dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
619 DMA_TO_DEVICE);
621 return 0;
624 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
626 struct drm_gem_object *gem = buf->priv;
627 int err;
629 err = drm_gem_mmap_obj(gem, gem->size, vma);
630 if (err < 0)
631 return err;
633 return __tegra_gem_mmap(gem, vma);
636 static void *tegra_gem_prime_vmap(struct dma_buf *buf)
638 struct drm_gem_object *gem = buf->priv;
639 struct tegra_bo *bo = to_tegra_bo(gem);
641 return bo->vaddr;
644 static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
648 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
649 .map_dma_buf = tegra_gem_prime_map_dma_buf,
650 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
651 .release = tegra_gem_prime_release,
652 .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
653 .end_cpu_access = tegra_gem_prime_end_cpu_access,
654 .mmap = tegra_gem_prime_mmap,
655 .vmap = tegra_gem_prime_vmap,
656 .vunmap = tegra_gem_prime_vunmap,
659 struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
660 int flags)
662 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
664 exp_info.exp_name = KBUILD_MODNAME;
665 exp_info.owner = gem->dev->driver->fops->owner;
666 exp_info.ops = &tegra_gem_prime_dmabuf_ops;
667 exp_info.size = gem->size;
668 exp_info.flags = flags;
669 exp_info.priv = gem;
671 return drm_gem_dmabuf_export(gem->dev, &exp_info);
674 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
675 struct dma_buf *buf)
677 struct tegra_bo *bo;
679 if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
680 struct drm_gem_object *gem = buf->priv;
682 if (gem->dev == drm) {
683 drm_gem_object_get(gem);
684 return gem;
688 bo = tegra_bo_import(drm, buf);
689 if (IS_ERR(bo))
690 return ERR_CAST(bo);
692 return &bo->gem;