2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
4 * Copyright (C) 2010 Samsung Electronics
6 * Author: Pawel Osciak <pawel@osciak.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-dma-contig.h>
22 #include <media/videobuf2-memops.h>
33 enum dma_data_direction dma_dir
;
34 struct sg_table
*dma_sgt
;
37 struct vb2_vmarea_handler handler
;
39 struct sg_table
*sgt_base
;
42 struct vm_area_struct
*vma
;
45 struct dma_buf_attachment
*db_attach
;
48 /*********************************************/
49 /* scatterlist table functions */
50 /*********************************************/
53 static void vb2_dc_sgt_foreach_page(struct sg_table
*sgt
,
54 void (*cb
)(struct page
*pg
))
56 struct scatterlist
*s
;
59 for_each_sg(sgt
->sgl
, s
, sgt
->orig_nents
, i
) {
60 struct page
*page
= sg_page(s
);
61 unsigned int n_pages
= PAGE_ALIGN(s
->offset
+ s
->length
)
65 for (j
= 0; j
< n_pages
; ++j
, ++page
)
70 static unsigned long vb2_dc_get_contiguous_size(struct sg_table
*sgt
)
72 struct scatterlist
*s
;
73 dma_addr_t expected
= sg_dma_address(sgt
->sgl
);
75 unsigned long size
= 0;
77 for_each_sg(sgt
->sgl
, s
, sgt
->nents
, i
) {
78 if (sg_dma_address(s
) != expected
)
80 expected
= sg_dma_address(s
) + sg_dma_len(s
);
81 size
+= sg_dma_len(s
);
86 /*********************************************/
87 /* callbacks for all buffers */
88 /*********************************************/
90 static void *vb2_dc_cookie(void *buf_priv
)
92 struct vb2_dc_buf
*buf
= buf_priv
;
94 return &buf
->dma_addr
;
97 static void *vb2_dc_vaddr(void *buf_priv
)
99 struct vb2_dc_buf
*buf
= buf_priv
;
104 static unsigned int vb2_dc_num_users(void *buf_priv
)
106 struct vb2_dc_buf
*buf
= buf_priv
;
108 return atomic_read(&buf
->refcount
);
111 static void vb2_dc_prepare(void *buf_priv
)
113 struct vb2_dc_buf
*buf
= buf_priv
;
114 struct sg_table
*sgt
= buf
->dma_sgt
;
116 /* DMABUF exporter will flush the cache for us */
117 if (!sgt
|| buf
->db_attach
)
120 dma_sync_sg_for_device(buf
->dev
, sgt
->sgl
, sgt
->orig_nents
,
124 static void vb2_dc_finish(void *buf_priv
)
126 struct vb2_dc_buf
*buf
= buf_priv
;
127 struct sg_table
*sgt
= buf
->dma_sgt
;
129 /* DMABUF exporter will flush the cache for us */
130 if (!sgt
|| buf
->db_attach
)
133 dma_sync_sg_for_cpu(buf
->dev
, sgt
->sgl
, sgt
->orig_nents
, buf
->dma_dir
);
136 /*********************************************/
137 /* callbacks for MMAP buffers */
138 /*********************************************/
140 static void vb2_dc_put(void *buf_priv
)
142 struct vb2_dc_buf
*buf
= buf_priv
;
144 if (!atomic_dec_and_test(&buf
->refcount
))
148 sg_free_table(buf
->sgt_base
);
149 kfree(buf
->sgt_base
);
151 dma_free_coherent(buf
->dev
, buf
->size
, buf
->vaddr
, buf
->dma_addr
);
152 put_device(buf
->dev
);
156 static void *vb2_dc_alloc(void *alloc_ctx
, unsigned long size
, gfp_t gfp_flags
)
158 struct vb2_dc_conf
*conf
= alloc_ctx
;
159 struct device
*dev
= conf
->dev
;
160 struct vb2_dc_buf
*buf
;
162 buf
= kzalloc(sizeof *buf
, GFP_KERNEL
);
164 return ERR_PTR(-ENOMEM
);
166 buf
->vaddr
= dma_alloc_coherent(dev
, size
, &buf
->dma_addr
,
167 GFP_KERNEL
| gfp_flags
);
169 dev_err(dev
, "dma_alloc_coherent of size %ld failed\n", size
);
171 return ERR_PTR(-ENOMEM
);
174 /* Prevent the device from being released while the buffer is used */
175 buf
->dev
= get_device(dev
);
178 buf
->handler
.refcount
= &buf
->refcount
;
179 buf
->handler
.put
= vb2_dc_put
;
180 buf
->handler
.arg
= buf
;
182 atomic_inc(&buf
->refcount
);
187 static int vb2_dc_mmap(void *buf_priv
, struct vm_area_struct
*vma
)
189 struct vb2_dc_buf
*buf
= buf_priv
;
193 printk(KERN_ERR
"No buffer to map\n");
198 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
203 ret
= dma_mmap_coherent(buf
->dev
, vma
, buf
->vaddr
,
204 buf
->dma_addr
, buf
->size
);
207 pr_err("Remapping memory failed, error: %d\n", ret
);
211 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
212 vma
->vm_private_data
= &buf
->handler
;
213 vma
->vm_ops
= &vb2_common_vm_ops
;
215 vma
->vm_ops
->open(vma
);
217 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
218 __func__
, (unsigned long)buf
->dma_addr
, vma
->vm_start
,
224 /*********************************************/
225 /* DMABUF ops for exporters */
226 /*********************************************/
228 struct vb2_dc_attachment
{
230 enum dma_data_direction dir
;
233 static int vb2_dc_dmabuf_ops_attach(struct dma_buf
*dbuf
, struct device
*dev
,
234 struct dma_buf_attachment
*dbuf_attach
)
236 struct vb2_dc_attachment
*attach
;
238 struct scatterlist
*rd
, *wr
;
239 struct sg_table
*sgt
;
240 struct vb2_dc_buf
*buf
= dbuf
->priv
;
243 attach
= kzalloc(sizeof(*attach
), GFP_KERNEL
);
248 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
249 * map the same scatter list to multiple attachments at the same time.
251 ret
= sg_alloc_table(sgt
, buf
->sgt_base
->orig_nents
, GFP_KERNEL
);
257 rd
= buf
->sgt_base
->sgl
;
259 for (i
= 0; i
< sgt
->orig_nents
; ++i
) {
260 sg_set_page(wr
, sg_page(rd
), rd
->length
, rd
->offset
);
265 attach
->dir
= DMA_NONE
;
266 dbuf_attach
->priv
= attach
;
271 static void vb2_dc_dmabuf_ops_detach(struct dma_buf
*dbuf
,
272 struct dma_buf_attachment
*db_attach
)
274 struct vb2_dc_attachment
*attach
= db_attach
->priv
;
275 struct sg_table
*sgt
;
282 /* release the scatterlist cache */
283 if (attach
->dir
!= DMA_NONE
)
284 dma_unmap_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
,
288 db_attach
->priv
= NULL
;
291 static struct sg_table
*vb2_dc_dmabuf_ops_map(
292 struct dma_buf_attachment
*db_attach
, enum dma_data_direction dir
)
294 struct vb2_dc_attachment
*attach
= db_attach
->priv
;
295 /* stealing dmabuf mutex to serialize map/unmap operations */
296 struct mutex
*lock
= &db_attach
->dmabuf
->lock
;
297 struct sg_table
*sgt
;
303 /* return previously mapped sg table */
304 if (attach
->dir
== dir
) {
309 /* release any previous cache */
310 if (attach
->dir
!= DMA_NONE
) {
311 dma_unmap_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
,
313 attach
->dir
= DMA_NONE
;
316 /* mapping to the client with new direction */
317 ret
= dma_map_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
, dir
);
319 pr_err("failed to map scatterlist\n");
321 return ERR_PTR(-EIO
);
331 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment
*db_attach
,
332 struct sg_table
*sgt
, enum dma_data_direction dir
)
334 /* nothing to be done here */
337 static void vb2_dc_dmabuf_ops_release(struct dma_buf
*dbuf
)
339 /* drop reference obtained in vb2_dc_get_dmabuf */
340 vb2_dc_put(dbuf
->priv
);
343 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf
*dbuf
, unsigned long pgnum
)
345 struct vb2_dc_buf
*buf
= dbuf
->priv
;
347 return buf
->vaddr
+ pgnum
* PAGE_SIZE
;
350 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf
*dbuf
)
352 struct vb2_dc_buf
*buf
= dbuf
->priv
;
357 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf
*dbuf
,
358 struct vm_area_struct
*vma
)
360 return vb2_dc_mmap(dbuf
->priv
, vma
);
363 static struct dma_buf_ops vb2_dc_dmabuf_ops
= {
364 .attach
= vb2_dc_dmabuf_ops_attach
,
365 .detach
= vb2_dc_dmabuf_ops_detach
,
366 .map_dma_buf
= vb2_dc_dmabuf_ops_map
,
367 .unmap_dma_buf
= vb2_dc_dmabuf_ops_unmap
,
368 .kmap
= vb2_dc_dmabuf_ops_kmap
,
369 .kmap_atomic
= vb2_dc_dmabuf_ops_kmap
,
370 .vmap
= vb2_dc_dmabuf_ops_vmap
,
371 .mmap
= vb2_dc_dmabuf_ops_mmap
,
372 .release
= vb2_dc_dmabuf_ops_release
,
375 static struct sg_table
*vb2_dc_get_base_sgt(struct vb2_dc_buf
*buf
)
378 struct sg_table
*sgt
;
380 sgt
= kmalloc(sizeof(*sgt
), GFP_KERNEL
);
382 dev_err(buf
->dev
, "failed to alloc sg table\n");
386 ret
= dma_get_sgtable(buf
->dev
, sgt
, buf
->vaddr
, buf
->dma_addr
,
389 dev_err(buf
->dev
, "failed to get scatterlist from DMA API\n");
397 static struct dma_buf
*vb2_dc_get_dmabuf(void *buf_priv
)
399 struct vb2_dc_buf
*buf
= buf_priv
;
400 struct dma_buf
*dbuf
;
403 buf
->sgt_base
= vb2_dc_get_base_sgt(buf
);
405 if (WARN_ON(!buf
->sgt_base
))
408 dbuf
= dma_buf_export(buf
, &vb2_dc_dmabuf_ops
, buf
->size
, 0);
412 /* dmabuf keeps reference to vb2 buffer */
413 atomic_inc(&buf
->refcount
);
418 /*********************************************/
419 /* callbacks for USERPTR buffers */
420 /*********************************************/
422 static inline int vma_is_io(struct vm_area_struct
*vma
)
424 return !!(vma
->vm_flags
& (VM_IO
| VM_PFNMAP
));
427 static int vb2_dc_get_user_pfn(unsigned long start
, int n_pages
,
428 struct vm_area_struct
*vma
, unsigned long *res
)
430 unsigned long pfn
, start_pfn
, prev_pfn
;
437 ret
= follow_pfn(vma
, start
, &pfn
);
444 for (i
= 1; i
< n_pages
; ++i
, start
+= PAGE_SIZE
) {
446 ret
= follow_pfn(vma
, start
, &pfn
);
449 pr_err("no page for address %lu\n", start
);
452 if (pfn
!= prev_pfn
+ 1)
460 static int vb2_dc_get_user_pages(unsigned long start
, struct page
**pages
,
461 int n_pages
, struct vm_area_struct
*vma
, int write
)
463 if (vma_is_io(vma
)) {
466 for (i
= 0; i
< n_pages
; ++i
, start
+= PAGE_SIZE
) {
468 int ret
= follow_pfn(vma
, start
, &pfn
);
474 pr_err("no page for address %lu\n", start
);
477 pages
[i
] = pfn_to_page(pfn
);
482 n
= get_user_pages(current
, current
->mm
, start
& PAGE_MASK
,
483 n_pages
, write
, 1, pages
, NULL
);
484 /* negative error means that no page was pinned */
487 pr_err("got only %d of %d user pages\n", n
, n_pages
);
489 put_page(pages
[--n
]);
497 static void vb2_dc_put_dirty_page(struct page
*page
)
499 set_page_dirty_lock(page
);
503 static void vb2_dc_put_userptr(void *buf_priv
)
505 struct vb2_dc_buf
*buf
= buf_priv
;
506 struct sg_table
*sgt
= buf
->dma_sgt
;
509 dma_unmap_sg(buf
->dev
, sgt
->sgl
, sgt
->orig_nents
, buf
->dma_dir
);
510 if (!vma_is_io(buf
->vma
))
511 vb2_dc_sgt_foreach_page(sgt
, vb2_dc_put_dirty_page
);
516 vb2_put_vma(buf
->vma
);
521 * For some kind of reserved memory there might be no struct page available,
522 * so all that can be done to support such 'pages' is to try to convert
523 * pfn to dma address or at the last resort just assume that
524 * dma address == physical address (like it has been assumed in earlier version
525 * of videobuf2-dma-contig
528 #ifdef __arch_pfn_to_dma
529 static inline dma_addr_t
vb2_dc_pfn_to_dma(struct device
*dev
, unsigned long pfn
)
531 return (dma_addr_t
)__arch_pfn_to_dma(dev
, pfn
);
533 #elif defined(__pfn_to_bus)
534 static inline dma_addr_t
vb2_dc_pfn_to_dma(struct device
*dev
, unsigned long pfn
)
536 return (dma_addr_t
)__pfn_to_bus(pfn
);
538 #elif defined(__pfn_to_phys)
539 static inline dma_addr_t
vb2_dc_pfn_to_dma(struct device
*dev
, unsigned long pfn
)
541 return (dma_addr_t
)__pfn_to_phys(pfn
);
544 static inline dma_addr_t
vb2_dc_pfn_to_dma(struct device
*dev
, unsigned long pfn
)
546 /* really, we cannot do anything better at this point */
547 return (dma_addr_t
)(pfn
) << PAGE_SHIFT
;
551 static void *vb2_dc_get_userptr(void *alloc_ctx
, unsigned long vaddr
,
552 unsigned long size
, int write
)
554 struct vb2_dc_conf
*conf
= alloc_ctx
;
555 struct vb2_dc_buf
*buf
;
558 unsigned long offset
;
562 struct vm_area_struct
*vma
;
563 struct sg_table
*sgt
;
564 unsigned long contig_size
;
565 unsigned long dma_align
= dma_get_cache_alignment();
567 /* Only cache aligned DMA transfers are reliable */
568 if (!IS_ALIGNED(vaddr
| size
, dma_align
)) {
569 pr_debug("user data must be aligned to %lu bytes\n", dma_align
);
570 return ERR_PTR(-EINVAL
);
574 pr_debug("size is zero\n");
575 return ERR_PTR(-EINVAL
);
578 buf
= kzalloc(sizeof *buf
, GFP_KERNEL
);
580 return ERR_PTR(-ENOMEM
);
582 buf
->dev
= conf
->dev
;
583 buf
->dma_dir
= write
? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
585 start
= vaddr
& PAGE_MASK
;
586 offset
= vaddr
& ~PAGE_MASK
;
587 end
= PAGE_ALIGN(vaddr
+ size
);
588 n_pages
= (end
- start
) >> PAGE_SHIFT
;
590 pages
= kmalloc(n_pages
* sizeof(pages
[0]), GFP_KERNEL
);
593 pr_err("failed to allocate pages table\n");
597 /* current->mm->mmap_sem is taken by videobuf2 core */
598 vma
= find_vma(current
->mm
, vaddr
);
600 pr_err("no vma for address %lu\n", vaddr
);
605 if (vma
->vm_end
< vaddr
+ size
) {
606 pr_err("vma at %lu is too small for %lu bytes\n", vaddr
, size
);
611 buf
->vma
= vb2_get_vma(vma
);
613 pr_err("failed to copy vma\n");
618 /* extract page list from userspace mapping */
619 ret
= vb2_dc_get_user_pages(start
, pages
, n_pages
, vma
, write
);
622 if (vb2_dc_get_user_pfn(start
, n_pages
, vma
, &pfn
) == 0) {
623 buf
->dma_addr
= vb2_dc_pfn_to_dma(buf
->dev
, pfn
);
629 pr_err("failed to get user pages\n");
633 sgt
= kzalloc(sizeof(*sgt
), GFP_KERNEL
);
635 pr_err("failed to allocate sg table\n");
637 goto fail_get_user_pages
;
640 ret
= sg_alloc_table_from_pages(sgt
, pages
, n_pages
,
641 offset
, size
, GFP_KERNEL
);
643 pr_err("failed to initialize sg table\n");
647 /* pages are no longer needed */
651 sgt
->nents
= dma_map_sg(buf
->dev
, sgt
->sgl
, sgt
->orig_nents
,
653 if (sgt
->nents
<= 0) {
654 pr_err("failed to map scatterlist\n");
659 contig_size
= vb2_dc_get_contiguous_size(sgt
);
660 if (contig_size
< size
) {
661 pr_err("contiguous mapping is too small %lu/%lu\n",
667 buf
->dma_addr
= sg_dma_address(sgt
->sgl
);
674 dma_unmap_sg(buf
->dev
, sgt
->sgl
, sgt
->orig_nents
, buf
->dma_dir
);
677 if (!vma_is_io(buf
->vma
))
678 vb2_dc_sgt_foreach_page(sgt
, put_page
);
685 if (pages
&& !vma_is_io(buf
->vma
))
687 put_page(pages
[--n_pages
]);
690 vb2_put_vma(buf
->vma
);
693 kfree(pages
); /* kfree is NULL-proof */
701 /*********************************************/
702 /* callbacks for DMABUF buffers */
703 /*********************************************/
705 static int vb2_dc_map_dmabuf(void *mem_priv
)
707 struct vb2_dc_buf
*buf
= mem_priv
;
708 struct sg_table
*sgt
;
709 unsigned long contig_size
;
711 if (WARN_ON(!buf
->db_attach
)) {
712 pr_err("trying to pin a non attached buffer\n");
716 if (WARN_ON(buf
->dma_sgt
)) {
717 pr_err("dmabuf buffer is already pinned\n");
721 /* get the associated scatterlist for this buffer */
722 sgt
= dma_buf_map_attachment(buf
->db_attach
, buf
->dma_dir
);
723 if (IS_ERR_OR_NULL(sgt
)) {
724 pr_err("Error getting dmabuf scatterlist\n");
728 /* checking if dmabuf is big enough to store contiguous chunk */
729 contig_size
= vb2_dc_get_contiguous_size(sgt
);
730 if (contig_size
< buf
->size
) {
731 pr_err("contiguous chunk is too small %lu/%lu b\n",
732 contig_size
, buf
->size
);
733 dma_buf_unmap_attachment(buf
->db_attach
, sgt
, buf
->dma_dir
);
737 buf
->dma_addr
= sg_dma_address(sgt
->sgl
);
743 static void vb2_dc_unmap_dmabuf(void *mem_priv
)
745 struct vb2_dc_buf
*buf
= mem_priv
;
746 struct sg_table
*sgt
= buf
->dma_sgt
;
748 if (WARN_ON(!buf
->db_attach
)) {
749 pr_err("trying to unpin a not attached buffer\n");
754 pr_err("dmabuf buffer is already unpinned\n");
758 dma_buf_unmap_attachment(buf
->db_attach
, sgt
, buf
->dma_dir
);
764 static void vb2_dc_detach_dmabuf(void *mem_priv
)
766 struct vb2_dc_buf
*buf
= mem_priv
;
768 /* if vb2 works correctly you should never detach mapped buffer */
769 if (WARN_ON(buf
->dma_addr
))
770 vb2_dc_unmap_dmabuf(buf
);
772 /* detach this attachment */
773 dma_buf_detach(buf
->db_attach
->dmabuf
, buf
->db_attach
);
777 static void *vb2_dc_attach_dmabuf(void *alloc_ctx
, struct dma_buf
*dbuf
,
778 unsigned long size
, int write
)
780 struct vb2_dc_conf
*conf
= alloc_ctx
;
781 struct vb2_dc_buf
*buf
;
782 struct dma_buf_attachment
*dba
;
784 if (dbuf
->size
< size
)
785 return ERR_PTR(-EFAULT
);
787 buf
= kzalloc(sizeof(*buf
), GFP_KERNEL
);
789 return ERR_PTR(-ENOMEM
);
791 buf
->dev
= conf
->dev
;
792 /* create attachment for the dmabuf with the user device */
793 dba
= dma_buf_attach(dbuf
, buf
->dev
);
795 pr_err("failed to attach dmabuf\n");
800 buf
->dma_dir
= write
? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
802 buf
->db_attach
= dba
;
807 /*********************************************/
808 /* DMA CONTIG exported functions */
809 /*********************************************/
811 const struct vb2_mem_ops vb2_dma_contig_memops
= {
812 .alloc
= vb2_dc_alloc
,
814 .get_dmabuf
= vb2_dc_get_dmabuf
,
815 .cookie
= vb2_dc_cookie
,
816 .vaddr
= vb2_dc_vaddr
,
818 .get_userptr
= vb2_dc_get_userptr
,
819 .put_userptr
= vb2_dc_put_userptr
,
820 .prepare
= vb2_dc_prepare
,
821 .finish
= vb2_dc_finish
,
822 .map_dmabuf
= vb2_dc_map_dmabuf
,
823 .unmap_dmabuf
= vb2_dc_unmap_dmabuf
,
824 .attach_dmabuf
= vb2_dc_attach_dmabuf
,
825 .detach_dmabuf
= vb2_dc_detach_dmabuf
,
826 .num_users
= vb2_dc_num_users
,
828 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops
);
830 void *vb2_dma_contig_init_ctx(struct device
*dev
)
832 struct vb2_dc_conf
*conf
;
834 conf
= kzalloc(sizeof *conf
, GFP_KERNEL
);
836 return ERR_PTR(-ENOMEM
);
842 EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx
);
844 void vb2_dma_contig_cleanup_ctx(void *alloc_ctx
)
848 EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx
);
850 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
851 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
852 MODULE_LICENSE("GPL");