2 * NVIDIA Tegra DRM GEM helper functions
4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
5 * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
7 * Based on the GEM/CMA helpers
9 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #include <linux/dma-buf.h>
17 #include <linux/iommu.h>
18 #include <drm/tegra_drm.h>
23 static inline struct tegra_bo
*host1x_to_tegra_bo(struct host1x_bo
*bo
)
25 return container_of(bo
, struct tegra_bo
, base
);
28 static void tegra_bo_put(struct host1x_bo
*bo
)
30 struct tegra_bo
*obj
= host1x_to_tegra_bo(bo
);
31 struct drm_device
*drm
= obj
->gem
.dev
;
33 mutex_lock(&drm
->struct_mutex
);
34 drm_gem_object_unreference(&obj
->gem
);
35 mutex_unlock(&drm
->struct_mutex
);
38 static dma_addr_t
tegra_bo_pin(struct host1x_bo
*bo
, struct sg_table
**sgt
)
40 struct tegra_bo
*obj
= host1x_to_tegra_bo(bo
);
45 static void tegra_bo_unpin(struct host1x_bo
*bo
, struct sg_table
*sgt
)
49 static void *tegra_bo_mmap(struct host1x_bo
*bo
)
51 struct tegra_bo
*obj
= host1x_to_tegra_bo(bo
);
56 static void tegra_bo_munmap(struct host1x_bo
*bo
, void *addr
)
60 static void *tegra_bo_kmap(struct host1x_bo
*bo
, unsigned int page
)
62 struct tegra_bo
*obj
= host1x_to_tegra_bo(bo
);
64 return obj
->vaddr
+ page
* PAGE_SIZE
;
67 static void tegra_bo_kunmap(struct host1x_bo
*bo
, unsigned int page
,
72 static struct host1x_bo
*tegra_bo_get(struct host1x_bo
*bo
)
74 struct tegra_bo
*obj
= host1x_to_tegra_bo(bo
);
75 struct drm_device
*drm
= obj
->gem
.dev
;
77 mutex_lock(&drm
->struct_mutex
);
78 drm_gem_object_reference(&obj
->gem
);
79 mutex_unlock(&drm
->struct_mutex
);
84 static const struct host1x_bo_ops tegra_bo_ops
= {
88 .unpin
= tegra_bo_unpin
,
89 .mmap
= tegra_bo_mmap
,
90 .munmap
= tegra_bo_munmap
,
91 .kmap
= tegra_bo_kmap
,
92 .kunmap
= tegra_bo_kunmap
,
95 static int tegra_bo_iommu_map(struct tegra_drm
*tegra
, struct tegra_bo
*bo
)
97 int prot
= IOMMU_READ
| IOMMU_WRITE
;
103 bo
->mm
= kzalloc(sizeof(*bo
->mm
), GFP_KERNEL
);
107 err
= drm_mm_insert_node_generic(&tegra
->mm
, bo
->mm
, bo
->gem
.size
,
110 dev_err(tegra
->drm
->dev
, "out of I/O virtual memory: %zd\n",
115 bo
->paddr
= bo
->mm
->start
;
117 err
= iommu_map_sg(tegra
->domain
, bo
->paddr
, bo
->sgt
->sgl
,
118 bo
->sgt
->nents
, prot
);
120 dev_err(tegra
->drm
->dev
, "failed to map buffer: %zd\n", err
);
129 drm_mm_remove_node(bo
->mm
);
135 static int tegra_bo_iommu_unmap(struct tegra_drm
*tegra
, struct tegra_bo
*bo
)
140 iommu_unmap(tegra
->domain
, bo
->paddr
, bo
->size
);
141 drm_mm_remove_node(bo
->mm
);
147 static struct tegra_bo
*tegra_bo_alloc_object(struct drm_device
*drm
,
153 bo
= kzalloc(sizeof(*bo
), GFP_KERNEL
);
155 return ERR_PTR(-ENOMEM
);
157 host1x_bo_init(&bo
->base
, &tegra_bo_ops
);
158 size
= round_up(size
, PAGE_SIZE
);
160 err
= drm_gem_object_init(drm
, &bo
->gem
, size
);
164 err
= drm_gem_create_mmap_offset(&bo
->gem
);
171 drm_gem_object_release(&bo
->gem
);
177 static void tegra_bo_free(struct drm_device
*drm
, struct tegra_bo
*bo
)
180 drm_gem_put_pages(&bo
->gem
, bo
->pages
, true, true);
181 sg_free_table(bo
->sgt
);
183 } else if (bo
->vaddr
) {
184 dma_free_writecombine(drm
->dev
, bo
->gem
.size
, bo
->vaddr
,
189 static int tegra_bo_get_pages(struct drm_device
*drm
, struct tegra_bo
*bo
)
191 struct scatterlist
*s
;
194 bo
->pages
= drm_gem_get_pages(&bo
->gem
);
195 if (IS_ERR(bo
->pages
))
196 return PTR_ERR(bo
->pages
);
198 bo
->num_pages
= bo
->gem
.size
>> PAGE_SHIFT
;
200 bo
->sgt
= drm_prime_pages_to_sg(bo
->pages
, bo
->num_pages
);
205 * Fake up the SG table so that dma_sync_sg_for_device() can be used
206 * to flush the pages associated with it.
208 * TODO: Replace this by drm_clflash_sg() once it can be implemented
209 * without relying on symbols that are not exported.
211 for_each_sg(bo
->sgt
->sgl
, s
, bo
->sgt
->nents
, i
)
212 sg_dma_address(s
) = sg_phys(s
);
214 dma_sync_sg_for_device(drm
->dev
, bo
->sgt
->sgl
, bo
->sgt
->nents
,
220 drm_gem_put_pages(&bo
->gem
, bo
->pages
, false, false);
221 return PTR_ERR(bo
->sgt
);
224 static int tegra_bo_alloc(struct drm_device
*drm
, struct tegra_bo
*bo
)
226 struct tegra_drm
*tegra
= drm
->dev_private
;
230 err
= tegra_bo_get_pages(drm
, bo
);
234 err
= tegra_bo_iommu_map(tegra
, bo
);
236 tegra_bo_free(drm
, bo
);
240 size_t size
= bo
->gem
.size
;
242 bo
->vaddr
= dma_alloc_writecombine(drm
->dev
, size
, &bo
->paddr
,
243 GFP_KERNEL
| __GFP_NOWARN
);
246 "failed to allocate buffer of size %zu\n",
255 struct tegra_bo
*tegra_bo_create(struct drm_device
*drm
, size_t size
,
261 bo
= tegra_bo_alloc_object(drm
, size
);
265 err
= tegra_bo_alloc(drm
, bo
);
269 if (flags
& DRM_TEGRA_GEM_CREATE_TILED
)
270 bo
->tiling
.mode
= TEGRA_BO_TILING_MODE_TILED
;
272 if (flags
& DRM_TEGRA_GEM_CREATE_BOTTOM_UP
)
273 bo
->flags
|= TEGRA_BO_BOTTOM_UP
;
278 drm_gem_object_release(&bo
->gem
);
283 struct tegra_bo
*tegra_bo_create_with_handle(struct drm_file
*file
,
284 struct drm_device
*drm
,
292 bo
= tegra_bo_create(drm
, size
, flags
);
296 err
= drm_gem_handle_create(file
, &bo
->gem
, handle
);
298 tegra_bo_free_object(&bo
->gem
);
302 drm_gem_object_unreference_unlocked(&bo
->gem
);
307 static struct tegra_bo
*tegra_bo_import(struct drm_device
*drm
,
310 struct tegra_drm
*tegra
= drm
->dev_private
;
311 struct dma_buf_attachment
*attach
;
315 bo
= tegra_bo_alloc_object(drm
, buf
->size
);
319 attach
= dma_buf_attach(buf
, drm
->dev
);
320 if (IS_ERR(attach
)) {
321 err
= PTR_ERR(attach
);
327 bo
->sgt
= dma_buf_map_attachment(attach
, DMA_TO_DEVICE
);
333 if (IS_ERR(bo
->sgt
)) {
334 err
= PTR_ERR(bo
->sgt
);
339 err
= tegra_bo_iommu_map(tegra
, bo
);
343 if (bo
->sgt
->nents
> 1) {
348 bo
->paddr
= sg_dma_address(bo
->sgt
->sgl
);
351 bo
->gem
.import_attach
= attach
;
356 if (!IS_ERR_OR_NULL(bo
->sgt
))
357 dma_buf_unmap_attachment(attach
, bo
->sgt
, DMA_TO_DEVICE
);
359 dma_buf_detach(buf
, attach
);
362 drm_gem_object_release(&bo
->gem
);
367 void tegra_bo_free_object(struct drm_gem_object
*gem
)
369 struct tegra_drm
*tegra
= gem
->dev
->dev_private
;
370 struct tegra_bo
*bo
= to_tegra_bo(gem
);
373 tegra_bo_iommu_unmap(tegra
, bo
);
375 if (gem
->import_attach
) {
376 dma_buf_unmap_attachment(gem
->import_attach
, bo
->sgt
,
378 drm_prime_gem_destroy(gem
, NULL
);
380 tegra_bo_free(gem
->dev
, bo
);
383 drm_gem_object_release(gem
);
387 int tegra_bo_dumb_create(struct drm_file
*file
, struct drm_device
*drm
,
388 struct drm_mode_create_dumb
*args
)
390 unsigned int min_pitch
= DIV_ROUND_UP(args
->width
* args
->bpp
, 8);
391 struct tegra_drm
*tegra
= drm
->dev_private
;
394 args
->pitch
= round_up(min_pitch
, tegra
->pitch_align
);
395 args
->size
= args
->pitch
* args
->height
;
397 bo
= tegra_bo_create_with_handle(file
, drm
, args
->size
, 0,
405 int tegra_bo_dumb_map_offset(struct drm_file
*file
, struct drm_device
*drm
,
406 u32 handle
, u64
*offset
)
408 struct drm_gem_object
*gem
;
411 mutex_lock(&drm
->struct_mutex
);
413 gem
= drm_gem_object_lookup(drm
, file
, handle
);
415 dev_err(drm
->dev
, "failed to lookup GEM object\n");
416 mutex_unlock(&drm
->struct_mutex
);
420 bo
= to_tegra_bo(gem
);
422 *offset
= drm_vma_node_offset_addr(&bo
->gem
.vma_node
);
424 drm_gem_object_unreference(gem
);
426 mutex_unlock(&drm
->struct_mutex
);
431 static int tegra_bo_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
433 struct drm_gem_object
*gem
= vma
->vm_private_data
;
434 struct tegra_bo
*bo
= to_tegra_bo(gem
);
440 return VM_FAULT_SIGBUS
;
442 offset
= ((unsigned long)vmf
->virtual_address
- vma
->vm_start
) >> PAGE_SHIFT
;
443 page
= bo
->pages
[offset
];
445 err
= vm_insert_page(vma
, (unsigned long)vmf
->virtual_address
, page
);
452 return VM_FAULT_NOPAGE
;
458 return VM_FAULT_SIGBUS
;
461 const struct vm_operations_struct tegra_bo_vm_ops
= {
462 .fault
= tegra_bo_fault
,
463 .open
= drm_gem_vm_open
,
464 .close
= drm_gem_vm_close
,
467 int tegra_drm_mmap(struct file
*file
, struct vm_area_struct
*vma
)
469 struct drm_gem_object
*gem
;
473 ret
= drm_gem_mmap(file
, vma
);
477 gem
= vma
->vm_private_data
;
478 bo
= to_tegra_bo(gem
);
481 unsigned long vm_pgoff
= vma
->vm_pgoff
;
483 vma
->vm_flags
&= ~VM_PFNMAP
;
486 ret
= dma_mmap_writecombine(gem
->dev
->dev
, vma
, bo
->vaddr
,
487 bo
->paddr
, gem
->size
);
489 drm_gem_vm_close(vma
);
493 vma
->vm_pgoff
= vm_pgoff
;
495 pgprot_t prot
= vm_get_page_prot(vma
->vm_flags
);
497 vma
->vm_flags
|= VM_MIXEDMAP
;
498 vma
->vm_flags
&= ~VM_PFNMAP
;
500 vma
->vm_page_prot
= pgprot_writecombine(prot
);
506 static struct sg_table
*
507 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment
*attach
,
508 enum dma_data_direction dir
)
510 struct drm_gem_object
*gem
= attach
->dmabuf
->priv
;
511 struct tegra_bo
*bo
= to_tegra_bo(gem
);
512 struct sg_table
*sgt
;
514 sgt
= kmalloc(sizeof(*sgt
), GFP_KERNEL
);
519 struct scatterlist
*sg
;
522 if (sg_alloc_table(sgt
, bo
->num_pages
, GFP_KERNEL
))
525 for_each_sg(sgt
->sgl
, sg
, bo
->num_pages
, i
)
526 sg_set_page(sg
, bo
->pages
[i
], PAGE_SIZE
, 0);
528 if (dma_map_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
, dir
) == 0)
531 if (sg_alloc_table(sgt
, 1, GFP_KERNEL
))
534 sg_dma_address(sgt
->sgl
) = bo
->paddr
;
535 sg_dma_len(sgt
->sgl
) = gem
->size
;
546 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment
*attach
,
547 struct sg_table
*sgt
,
548 enum dma_data_direction dir
)
550 struct drm_gem_object
*gem
= attach
->dmabuf
->priv
;
551 struct tegra_bo
*bo
= to_tegra_bo(gem
);
554 dma_unmap_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
, dir
);
560 static void tegra_gem_prime_release(struct dma_buf
*buf
)
562 drm_gem_dmabuf_release(buf
);
565 static void *tegra_gem_prime_kmap_atomic(struct dma_buf
*buf
,
571 static void tegra_gem_prime_kunmap_atomic(struct dma_buf
*buf
,
577 static void *tegra_gem_prime_kmap(struct dma_buf
*buf
, unsigned long page
)
582 static void tegra_gem_prime_kunmap(struct dma_buf
*buf
, unsigned long page
,
587 static int tegra_gem_prime_mmap(struct dma_buf
*buf
, struct vm_area_struct
*vma
)
592 static void *tegra_gem_prime_vmap(struct dma_buf
*buf
)
594 struct drm_gem_object
*gem
= buf
->priv
;
595 struct tegra_bo
*bo
= to_tegra_bo(gem
);
600 static void tegra_gem_prime_vunmap(struct dma_buf
*buf
, void *vaddr
)
604 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops
= {
605 .map_dma_buf
= tegra_gem_prime_map_dma_buf
,
606 .unmap_dma_buf
= tegra_gem_prime_unmap_dma_buf
,
607 .release
= tegra_gem_prime_release
,
608 .kmap_atomic
= tegra_gem_prime_kmap_atomic
,
609 .kunmap_atomic
= tegra_gem_prime_kunmap_atomic
,
610 .kmap
= tegra_gem_prime_kmap
,
611 .kunmap
= tegra_gem_prime_kunmap
,
612 .mmap
= tegra_gem_prime_mmap
,
613 .vmap
= tegra_gem_prime_vmap
,
614 .vunmap
= tegra_gem_prime_vunmap
,
617 struct dma_buf
*tegra_gem_prime_export(struct drm_device
*drm
,
618 struct drm_gem_object
*gem
,
621 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
623 exp_info
.ops
= &tegra_gem_prime_dmabuf_ops
;
624 exp_info
.size
= gem
->size
;
625 exp_info
.flags
= flags
;
628 return dma_buf_export(&exp_info
);
631 struct drm_gem_object
*tegra_gem_prime_import(struct drm_device
*drm
,
636 if (buf
->ops
== &tegra_gem_prime_dmabuf_ops
) {
637 struct drm_gem_object
*gem
= buf
->priv
;
639 if (gem
->dev
== drm
) {
640 drm_gem_object_reference(gem
);
645 bo
= tegra_bo_import(drm
, buf
);