1 // SPDX-License-Identifier: GPL-2.0-only
3 * NVIDIA Tegra DRM GEM helper functions
5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
8 * Based on the GEM/CMA helpers
10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
13 #include <linux/dma-buf.h>
14 #include <linux/iommu.h>
16 #include <drm/drm_drv.h>
17 #include <drm/drm_prime.h>
18 #include <drm/tegra_drm.h>
23 static void tegra_bo_put(struct host1x_bo
*bo
)
25 struct tegra_bo
*obj
= host1x_to_tegra_bo(bo
);
27 drm_gem_object_put_unlocked(&obj
->gem
);
30 /* XXX move this into lib/scatterlist.c? */
31 static int sg_alloc_table_from_sg(struct sg_table
*sgt
, struct scatterlist
*sg
,
32 unsigned int nents
, gfp_t gfp_mask
)
34 struct scatterlist
*dst
;
38 err
= sg_alloc_table(sgt
, nents
, gfp_mask
);
44 for (i
= 0; i
< nents
; i
++) {
45 sg_set_page(dst
, sg_page(sg
), sg
->length
, 0);
53 static struct sg_table
*tegra_bo_pin(struct device
*dev
, struct host1x_bo
*bo
,
56 struct tegra_bo
*obj
= host1x_to_tegra_bo(bo
);
61 * If we've manually mapped the buffer object through the IOMMU, make
62 * sure to return the IOVA address of our mapping.
64 if (phys
&& obj
->mm
) {
70 * If we don't have a mapping for this buffer yet, return an SG table
71 * so that host1x can do the mapping for us via the DMA API.
73 sgt
= kzalloc(sizeof(*sgt
), GFP_KERNEL
);
75 return ERR_PTR(-ENOMEM
);
79 * If the buffer object was allocated from the explicit IOMMU
80 * API code paths, construct an SG table from the pages.
82 err
= sg_alloc_table_from_pages(sgt
, obj
->pages
, obj
->num_pages
,
83 0, obj
->gem
.size
, GFP_KERNEL
);
86 } else if (obj
->sgt
) {
88 * If the buffer object already has an SG table but no pages
89 * were allocated for it, it means the buffer was imported and
90 * the SG table needs to be copied to avoid overwriting any
91 * other potential users of the original SG table.
93 err
= sg_alloc_table_from_sg(sgt
, obj
->sgt
->sgl
, obj
->sgt
->nents
,
99 * If the buffer object had no pages allocated and if it was
100 * not imported, it had to be allocated with the DMA API, so
101 * the DMA API helper can be used.
103 err
= dma_get_sgtable(dev
, sgt
, obj
->vaddr
, obj
->iova
,
116 static void tegra_bo_unpin(struct device
*dev
, struct sg_table
*sgt
)
124 static void *tegra_bo_mmap(struct host1x_bo
*bo
)
126 struct tegra_bo
*obj
= host1x_to_tegra_bo(bo
);
130 else if (obj
->gem
.import_attach
)
131 return dma_buf_vmap(obj
->gem
.import_attach
->dmabuf
);
133 return vmap(obj
->pages
, obj
->num_pages
, VM_MAP
,
134 pgprot_writecombine(PAGE_KERNEL
));
137 static void tegra_bo_munmap(struct host1x_bo
*bo
, void *addr
)
139 struct tegra_bo
*obj
= host1x_to_tegra_bo(bo
);
143 else if (obj
->gem
.import_attach
)
144 dma_buf_vunmap(obj
->gem
.import_attach
->dmabuf
, addr
);
149 static struct host1x_bo
*tegra_bo_get(struct host1x_bo
*bo
)
151 struct tegra_bo
*obj
= host1x_to_tegra_bo(bo
);
153 drm_gem_object_get(&obj
->gem
);
158 static const struct host1x_bo_ops tegra_bo_ops
= {
162 .unpin
= tegra_bo_unpin
,
163 .mmap
= tegra_bo_mmap
,
164 .munmap
= tegra_bo_munmap
,
167 static int tegra_bo_iommu_map(struct tegra_drm
*tegra
, struct tegra_bo
*bo
)
169 int prot
= IOMMU_READ
| IOMMU_WRITE
;
175 bo
->mm
= kzalloc(sizeof(*bo
->mm
), GFP_KERNEL
);
179 mutex_lock(&tegra
->mm_lock
);
181 err
= drm_mm_insert_node_generic(&tegra
->mm
,
182 bo
->mm
, bo
->gem
.size
, PAGE_SIZE
, 0, 0);
184 dev_err(tegra
->drm
->dev
, "out of I/O virtual memory: %d\n",
189 bo
->iova
= bo
->mm
->start
;
191 bo
->size
= iommu_map_sg(tegra
->domain
, bo
->iova
, bo
->sgt
->sgl
,
192 bo
->sgt
->nents
, prot
);
194 dev_err(tegra
->drm
->dev
, "failed to map buffer\n");
199 mutex_unlock(&tegra
->mm_lock
);
204 drm_mm_remove_node(bo
->mm
);
206 mutex_unlock(&tegra
->mm_lock
);
211 static int tegra_bo_iommu_unmap(struct tegra_drm
*tegra
, struct tegra_bo
*bo
)
216 mutex_lock(&tegra
->mm_lock
);
217 iommu_unmap(tegra
->domain
, bo
->iova
, bo
->size
);
218 drm_mm_remove_node(bo
->mm
);
219 mutex_unlock(&tegra
->mm_lock
);
226 static struct tegra_bo
*tegra_bo_alloc_object(struct drm_device
*drm
,
232 bo
= kzalloc(sizeof(*bo
), GFP_KERNEL
);
234 return ERR_PTR(-ENOMEM
);
236 host1x_bo_init(&bo
->base
, &tegra_bo_ops
);
237 size
= round_up(size
, PAGE_SIZE
);
239 err
= drm_gem_object_init(drm
, &bo
->gem
, size
);
243 err
= drm_gem_create_mmap_offset(&bo
->gem
);
250 drm_gem_object_release(&bo
->gem
);
256 static void tegra_bo_free(struct drm_device
*drm
, struct tegra_bo
*bo
)
259 dma_unmap_sg(drm
->dev
, bo
->sgt
->sgl
, bo
->sgt
->nents
,
261 drm_gem_put_pages(&bo
->gem
, bo
->pages
, true, true);
262 sg_free_table(bo
->sgt
);
264 } else if (bo
->vaddr
) {
265 dma_free_wc(drm
->dev
, bo
->gem
.size
, bo
->vaddr
, bo
->iova
);
269 static int tegra_bo_get_pages(struct drm_device
*drm
, struct tegra_bo
*bo
)
273 bo
->pages
= drm_gem_get_pages(&bo
->gem
);
274 if (IS_ERR(bo
->pages
))
275 return PTR_ERR(bo
->pages
);
277 bo
->num_pages
= bo
->gem
.size
>> PAGE_SHIFT
;
279 bo
->sgt
= drm_prime_pages_to_sg(bo
->pages
, bo
->num_pages
);
280 if (IS_ERR(bo
->sgt
)) {
281 err
= PTR_ERR(bo
->sgt
);
285 err
= dma_map_sg(drm
->dev
, bo
->sgt
->sgl
, bo
->sgt
->nents
,
295 sg_free_table(bo
->sgt
);
298 drm_gem_put_pages(&bo
->gem
, bo
->pages
, false, false);
302 static int tegra_bo_alloc(struct drm_device
*drm
, struct tegra_bo
*bo
)
304 struct tegra_drm
*tegra
= drm
->dev_private
;
308 err
= tegra_bo_get_pages(drm
, bo
);
312 err
= tegra_bo_iommu_map(tegra
, bo
);
314 tegra_bo_free(drm
, bo
);
318 size_t size
= bo
->gem
.size
;
320 bo
->vaddr
= dma_alloc_wc(drm
->dev
, size
, &bo
->iova
,
321 GFP_KERNEL
| __GFP_NOWARN
);
324 "failed to allocate buffer of size %zu\n",
333 struct tegra_bo
*tegra_bo_create(struct drm_device
*drm
, size_t size
,
339 bo
= tegra_bo_alloc_object(drm
, size
);
343 err
= tegra_bo_alloc(drm
, bo
);
347 if (flags
& DRM_TEGRA_GEM_CREATE_TILED
)
348 bo
->tiling
.mode
= TEGRA_BO_TILING_MODE_TILED
;
350 if (flags
& DRM_TEGRA_GEM_CREATE_BOTTOM_UP
)
351 bo
->flags
|= TEGRA_BO_BOTTOM_UP
;
356 drm_gem_object_release(&bo
->gem
);
361 struct tegra_bo
*tegra_bo_create_with_handle(struct drm_file
*file
,
362 struct drm_device
*drm
,
370 bo
= tegra_bo_create(drm
, size
, flags
);
374 err
= drm_gem_handle_create(file
, &bo
->gem
, handle
);
376 tegra_bo_free_object(&bo
->gem
);
380 drm_gem_object_put_unlocked(&bo
->gem
);
385 static struct tegra_bo
*tegra_bo_import(struct drm_device
*drm
,
388 struct tegra_drm
*tegra
= drm
->dev_private
;
389 struct dma_buf_attachment
*attach
;
393 bo
= tegra_bo_alloc_object(drm
, buf
->size
);
397 attach
= dma_buf_attach(buf
, drm
->dev
);
398 if (IS_ERR(attach
)) {
399 err
= PTR_ERR(attach
);
405 bo
->sgt
= dma_buf_map_attachment(attach
, DMA_TO_DEVICE
);
406 if (IS_ERR(bo
->sgt
)) {
407 err
= PTR_ERR(bo
->sgt
);
412 err
= tegra_bo_iommu_map(tegra
, bo
);
417 bo
->gem
.import_attach
= attach
;
422 if (!IS_ERR_OR_NULL(bo
->sgt
))
423 dma_buf_unmap_attachment(attach
, bo
->sgt
, DMA_TO_DEVICE
);
425 dma_buf_detach(buf
, attach
);
428 drm_gem_object_release(&bo
->gem
);
433 void tegra_bo_free_object(struct drm_gem_object
*gem
)
435 struct tegra_drm
*tegra
= gem
->dev
->dev_private
;
436 struct tegra_bo
*bo
= to_tegra_bo(gem
);
439 tegra_bo_iommu_unmap(tegra
, bo
);
441 if (gem
->import_attach
) {
442 dma_buf_unmap_attachment(gem
->import_attach
, bo
->sgt
,
444 drm_prime_gem_destroy(gem
, NULL
);
446 tegra_bo_free(gem
->dev
, bo
);
449 drm_gem_object_release(gem
);
453 int tegra_bo_dumb_create(struct drm_file
*file
, struct drm_device
*drm
,
454 struct drm_mode_create_dumb
*args
)
456 unsigned int min_pitch
= DIV_ROUND_UP(args
->width
* args
->bpp
, 8);
457 struct tegra_drm
*tegra
= drm
->dev_private
;
460 args
->pitch
= round_up(min_pitch
, tegra
->pitch_align
);
461 args
->size
= args
->pitch
* args
->height
;
463 bo
= tegra_bo_create_with_handle(file
, drm
, args
->size
, 0,
471 static vm_fault_t
tegra_bo_fault(struct vm_fault
*vmf
)
473 struct vm_area_struct
*vma
= vmf
->vma
;
474 struct drm_gem_object
*gem
= vma
->vm_private_data
;
475 struct tegra_bo
*bo
= to_tegra_bo(gem
);
480 return VM_FAULT_SIGBUS
;
482 offset
= (vmf
->address
- vma
->vm_start
) >> PAGE_SHIFT
;
483 page
= bo
->pages
[offset
];
485 return vmf_insert_page(vma
, vmf
->address
, page
);
488 const struct vm_operations_struct tegra_bo_vm_ops
= {
489 .fault
= tegra_bo_fault
,
490 .open
= drm_gem_vm_open
,
491 .close
= drm_gem_vm_close
,
494 int __tegra_gem_mmap(struct drm_gem_object
*gem
, struct vm_area_struct
*vma
)
496 struct tegra_bo
*bo
= to_tegra_bo(gem
);
499 unsigned long vm_pgoff
= vma
->vm_pgoff
;
503 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
504 * and set the vm_pgoff (used as a fake buffer offset by DRM)
505 * to 0 as we want to map the whole buffer.
507 vma
->vm_flags
&= ~VM_PFNMAP
;
510 err
= dma_mmap_wc(gem
->dev
->dev
, vma
, bo
->vaddr
, bo
->iova
,
513 drm_gem_vm_close(vma
);
517 vma
->vm_pgoff
= vm_pgoff
;
519 pgprot_t prot
= vm_get_page_prot(vma
->vm_flags
);
521 vma
->vm_flags
|= VM_MIXEDMAP
;
522 vma
->vm_flags
&= ~VM_PFNMAP
;
524 vma
->vm_page_prot
= pgprot_writecombine(prot
);
530 int tegra_drm_mmap(struct file
*file
, struct vm_area_struct
*vma
)
532 struct drm_gem_object
*gem
;
535 err
= drm_gem_mmap(file
, vma
);
539 gem
= vma
->vm_private_data
;
541 return __tegra_gem_mmap(gem
, vma
);
544 static struct sg_table
*
545 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment
*attach
,
546 enum dma_data_direction dir
)
548 struct drm_gem_object
*gem
= attach
->dmabuf
->priv
;
549 struct tegra_bo
*bo
= to_tegra_bo(gem
);
550 struct sg_table
*sgt
;
552 sgt
= kmalloc(sizeof(*sgt
), GFP_KERNEL
);
557 if (sg_alloc_table_from_pages(sgt
, bo
->pages
, bo
->num_pages
,
558 0, gem
->size
, GFP_KERNEL
) < 0)
561 if (dma_get_sgtable(attach
->dev
, sgt
, bo
->vaddr
, bo
->iova
,
566 if (dma_map_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
, dir
) == 0)
577 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment
*attach
,
578 struct sg_table
*sgt
,
579 enum dma_data_direction dir
)
581 struct drm_gem_object
*gem
= attach
->dmabuf
->priv
;
582 struct tegra_bo
*bo
= to_tegra_bo(gem
);
585 dma_unmap_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
, dir
);
591 static void tegra_gem_prime_release(struct dma_buf
*buf
)
593 drm_gem_dmabuf_release(buf
);
596 static int tegra_gem_prime_begin_cpu_access(struct dma_buf
*buf
,
597 enum dma_data_direction direction
)
599 struct drm_gem_object
*gem
= buf
->priv
;
600 struct tegra_bo
*bo
= to_tegra_bo(gem
);
601 struct drm_device
*drm
= gem
->dev
;
604 dma_sync_sg_for_cpu(drm
->dev
, bo
->sgt
->sgl
, bo
->sgt
->nents
,
610 static int tegra_gem_prime_end_cpu_access(struct dma_buf
*buf
,
611 enum dma_data_direction direction
)
613 struct drm_gem_object
*gem
= buf
->priv
;
614 struct tegra_bo
*bo
= to_tegra_bo(gem
);
615 struct drm_device
*drm
= gem
->dev
;
618 dma_sync_sg_for_device(drm
->dev
, bo
->sgt
->sgl
, bo
->sgt
->nents
,
624 static int tegra_gem_prime_mmap(struct dma_buf
*buf
, struct vm_area_struct
*vma
)
626 struct drm_gem_object
*gem
= buf
->priv
;
629 err
= drm_gem_mmap_obj(gem
, gem
->size
, vma
);
633 return __tegra_gem_mmap(gem
, vma
);
636 static void *tegra_gem_prime_vmap(struct dma_buf
*buf
)
638 struct drm_gem_object
*gem
= buf
->priv
;
639 struct tegra_bo
*bo
= to_tegra_bo(gem
);
644 static void tegra_gem_prime_vunmap(struct dma_buf
*buf
, void *vaddr
)
648 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops
= {
649 .map_dma_buf
= tegra_gem_prime_map_dma_buf
,
650 .unmap_dma_buf
= tegra_gem_prime_unmap_dma_buf
,
651 .release
= tegra_gem_prime_release
,
652 .begin_cpu_access
= tegra_gem_prime_begin_cpu_access
,
653 .end_cpu_access
= tegra_gem_prime_end_cpu_access
,
654 .mmap
= tegra_gem_prime_mmap
,
655 .vmap
= tegra_gem_prime_vmap
,
656 .vunmap
= tegra_gem_prime_vunmap
,
659 struct dma_buf
*tegra_gem_prime_export(struct drm_gem_object
*gem
,
662 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
664 exp_info
.exp_name
= KBUILD_MODNAME
;
665 exp_info
.owner
= gem
->dev
->driver
->fops
->owner
;
666 exp_info
.ops
= &tegra_gem_prime_dmabuf_ops
;
667 exp_info
.size
= gem
->size
;
668 exp_info
.flags
= flags
;
671 return drm_gem_dmabuf_export(gem
->dev
, &exp_info
);
674 struct drm_gem_object
*tegra_gem_prime_import(struct drm_device
*drm
,
679 if (buf
->ops
== &tegra_gem_prime_dmabuf_ops
) {
680 struct drm_gem_object
*gem
= buf
->priv
;
682 if (gem
->dev
== drm
) {
683 drm_gem_object_get(gem
);
688 bo
= tegra_bo_import(drm
, buf
);