2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
4 * Copyright (C) 2010 Samsung Electronics
6 * Author: Pawel Osciak <pawel@osciak.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-dma-contig.h>
22 #include <media/videobuf2-memops.h>
33 enum dma_data_direction dma_dir
;
34 struct sg_table
*dma_sgt
;
37 struct vb2_vmarea_handler handler
;
39 struct sg_table
*sgt_base
;
42 struct vm_area_struct
*vma
;
45 struct dma_buf_attachment
*db_attach
;
48 /*********************************************/
49 /* scatterlist table functions */
50 /*********************************************/
53 static void vb2_dc_sgt_foreach_page(struct sg_table
*sgt
,
54 void (*cb
)(struct page
*pg
))
56 struct scatterlist
*s
;
59 for_each_sg(sgt
->sgl
, s
, sgt
->orig_nents
, i
) {
60 struct page
*page
= sg_page(s
);
61 unsigned int n_pages
= PAGE_ALIGN(s
->offset
+ s
->length
)
65 for (j
= 0; j
< n_pages
; ++j
, ++page
)
70 static unsigned long vb2_dc_get_contiguous_size(struct sg_table
*sgt
)
72 struct scatterlist
*s
;
73 dma_addr_t expected
= sg_dma_address(sgt
->sgl
);
75 unsigned long size
= 0;
77 for_each_sg(sgt
->sgl
, s
, sgt
->nents
, i
) {
78 if (sg_dma_address(s
) != expected
)
80 expected
= sg_dma_address(s
) + sg_dma_len(s
);
81 size
+= sg_dma_len(s
);
86 /*********************************************/
87 /* callbacks for all buffers */
88 /*********************************************/
90 static void *vb2_dc_cookie(void *buf_priv
)
92 struct vb2_dc_buf
*buf
= buf_priv
;
94 return &buf
->dma_addr
;
97 static void *vb2_dc_vaddr(void *buf_priv
)
99 struct vb2_dc_buf
*buf
= buf_priv
;
104 static unsigned int vb2_dc_num_users(void *buf_priv
)
106 struct vb2_dc_buf
*buf
= buf_priv
;
108 return atomic_read(&buf
->refcount
);
111 static void vb2_dc_prepare(void *buf_priv
)
113 struct vb2_dc_buf
*buf
= buf_priv
;
114 struct sg_table
*sgt
= buf
->dma_sgt
;
116 /* DMABUF exporter will flush the cache for us */
117 if (!sgt
|| buf
->db_attach
)
120 dma_sync_sg_for_device(buf
->dev
, sgt
->sgl
, sgt
->nents
, buf
->dma_dir
);
123 static void vb2_dc_finish(void *buf_priv
)
125 struct vb2_dc_buf
*buf
= buf_priv
;
126 struct sg_table
*sgt
= buf
->dma_sgt
;
128 /* DMABUF exporter will flush the cache for us */
129 if (!sgt
|| buf
->db_attach
)
132 dma_sync_sg_for_cpu(buf
->dev
, sgt
->sgl
, sgt
->nents
, buf
->dma_dir
);
135 /*********************************************/
136 /* callbacks for MMAP buffers */
137 /*********************************************/
139 static void vb2_dc_put(void *buf_priv
)
141 struct vb2_dc_buf
*buf
= buf_priv
;
143 if (!atomic_dec_and_test(&buf
->refcount
))
147 sg_free_table(buf
->sgt_base
);
148 kfree(buf
->sgt_base
);
150 dma_free_coherent(buf
->dev
, buf
->size
, buf
->vaddr
, buf
->dma_addr
);
151 put_device(buf
->dev
);
155 static void *vb2_dc_alloc(void *alloc_ctx
, unsigned long size
, gfp_t gfp_flags
)
157 struct vb2_dc_conf
*conf
= alloc_ctx
;
158 struct device
*dev
= conf
->dev
;
159 struct vb2_dc_buf
*buf
;
161 buf
= kzalloc(sizeof *buf
, GFP_KERNEL
);
163 return ERR_PTR(-ENOMEM
);
165 buf
->vaddr
= dma_alloc_coherent(dev
, size
, &buf
->dma_addr
,
166 GFP_KERNEL
| gfp_flags
);
168 dev_err(dev
, "dma_alloc_coherent of size %ld failed\n", size
);
170 return ERR_PTR(-ENOMEM
);
173 /* Prevent the device from being released while the buffer is used */
174 buf
->dev
= get_device(dev
);
177 buf
->handler
.refcount
= &buf
->refcount
;
178 buf
->handler
.put
= vb2_dc_put
;
179 buf
->handler
.arg
= buf
;
181 atomic_inc(&buf
->refcount
);
186 static int vb2_dc_mmap(void *buf_priv
, struct vm_area_struct
*vma
)
188 struct vb2_dc_buf
*buf
= buf_priv
;
192 printk(KERN_ERR
"No buffer to map\n");
197 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
202 ret
= dma_mmap_coherent(buf
->dev
, vma
, buf
->vaddr
,
203 buf
->dma_addr
, buf
->size
);
206 pr_err("Remapping memory failed, error: %d\n", ret
);
210 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
211 vma
->vm_private_data
= &buf
->handler
;
212 vma
->vm_ops
= &vb2_common_vm_ops
;
214 vma
->vm_ops
->open(vma
);
216 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
217 __func__
, (unsigned long)buf
->dma_addr
, vma
->vm_start
,
223 /*********************************************/
224 /* DMABUF ops for exporters */
225 /*********************************************/
227 struct vb2_dc_attachment
{
229 enum dma_data_direction dir
;
232 static int vb2_dc_dmabuf_ops_attach(struct dma_buf
*dbuf
, struct device
*dev
,
233 struct dma_buf_attachment
*dbuf_attach
)
235 struct vb2_dc_attachment
*attach
;
237 struct scatterlist
*rd
, *wr
;
238 struct sg_table
*sgt
;
239 struct vb2_dc_buf
*buf
= dbuf
->priv
;
242 attach
= kzalloc(sizeof(*attach
), GFP_KERNEL
);
247 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
248 * map the same scatter list to multiple attachments at the same time.
250 ret
= sg_alloc_table(sgt
, buf
->sgt_base
->orig_nents
, GFP_KERNEL
);
256 rd
= buf
->sgt_base
->sgl
;
258 for (i
= 0; i
< sgt
->orig_nents
; ++i
) {
259 sg_set_page(wr
, sg_page(rd
), rd
->length
, rd
->offset
);
264 attach
->dir
= DMA_NONE
;
265 dbuf_attach
->priv
= attach
;
270 static void vb2_dc_dmabuf_ops_detach(struct dma_buf
*dbuf
,
271 struct dma_buf_attachment
*db_attach
)
273 struct vb2_dc_attachment
*attach
= db_attach
->priv
;
274 struct sg_table
*sgt
;
281 /* release the scatterlist cache */
282 if (attach
->dir
!= DMA_NONE
)
283 dma_unmap_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
,
287 db_attach
->priv
= NULL
;
290 static struct sg_table
*vb2_dc_dmabuf_ops_map(
291 struct dma_buf_attachment
*db_attach
, enum dma_data_direction dir
)
293 struct vb2_dc_attachment
*attach
= db_attach
->priv
;
294 /* stealing dmabuf mutex to serialize map/unmap operations */
295 struct mutex
*lock
= &db_attach
->dmabuf
->lock
;
296 struct sg_table
*sgt
;
302 /* return previously mapped sg table */
303 if (attach
->dir
== dir
) {
308 /* release any previous cache */
309 if (attach
->dir
!= DMA_NONE
) {
310 dma_unmap_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
,
312 attach
->dir
= DMA_NONE
;
315 /* mapping to the client with new direction */
316 ret
= dma_map_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
, dir
);
318 pr_err("failed to map scatterlist\n");
320 return ERR_PTR(-EIO
);
330 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment
*db_attach
,
331 struct sg_table
*sgt
, enum dma_data_direction dir
)
333 /* nothing to be done here */
336 static void vb2_dc_dmabuf_ops_release(struct dma_buf
*dbuf
)
338 /* drop reference obtained in vb2_dc_get_dmabuf */
339 vb2_dc_put(dbuf
->priv
);
342 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf
*dbuf
, unsigned long pgnum
)
344 struct vb2_dc_buf
*buf
= dbuf
->priv
;
346 return buf
->vaddr
+ pgnum
* PAGE_SIZE
;
349 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf
*dbuf
)
351 struct vb2_dc_buf
*buf
= dbuf
->priv
;
356 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf
*dbuf
,
357 struct vm_area_struct
*vma
)
359 return vb2_dc_mmap(dbuf
->priv
, vma
);
362 static struct dma_buf_ops vb2_dc_dmabuf_ops
= {
363 .attach
= vb2_dc_dmabuf_ops_attach
,
364 .detach
= vb2_dc_dmabuf_ops_detach
,
365 .map_dma_buf
= vb2_dc_dmabuf_ops_map
,
366 .unmap_dma_buf
= vb2_dc_dmabuf_ops_unmap
,
367 .kmap
= vb2_dc_dmabuf_ops_kmap
,
368 .kmap_atomic
= vb2_dc_dmabuf_ops_kmap
,
369 .vmap
= vb2_dc_dmabuf_ops_vmap
,
370 .mmap
= vb2_dc_dmabuf_ops_mmap
,
371 .release
= vb2_dc_dmabuf_ops_release
,
374 static struct sg_table
*vb2_dc_get_base_sgt(struct vb2_dc_buf
*buf
)
377 struct sg_table
*sgt
;
379 sgt
= kmalloc(sizeof(*sgt
), GFP_KERNEL
);
381 dev_err(buf
->dev
, "failed to alloc sg table\n");
385 ret
= dma_get_sgtable(buf
->dev
, sgt
, buf
->vaddr
, buf
->dma_addr
,
388 dev_err(buf
->dev
, "failed to get scatterlist from DMA API\n");
396 static struct dma_buf
*vb2_dc_get_dmabuf(void *buf_priv
, unsigned long flags
)
398 struct vb2_dc_buf
*buf
= buf_priv
;
399 struct dma_buf
*dbuf
;
402 buf
->sgt_base
= vb2_dc_get_base_sgt(buf
);
404 if (WARN_ON(!buf
->sgt_base
))
407 dbuf
= dma_buf_export(buf
, &vb2_dc_dmabuf_ops
, buf
->size
, flags
);
411 /* dmabuf keeps reference to vb2 buffer */
412 atomic_inc(&buf
->refcount
);
417 /*********************************************/
418 /* callbacks for USERPTR buffers */
419 /*********************************************/
421 static inline int vma_is_io(struct vm_area_struct
*vma
)
423 return !!(vma
->vm_flags
& (VM_IO
| VM_PFNMAP
));
426 static int vb2_dc_get_user_pfn(unsigned long start
, int n_pages
,
427 struct vm_area_struct
*vma
, unsigned long *res
)
429 unsigned long pfn
, start_pfn
, prev_pfn
;
436 ret
= follow_pfn(vma
, start
, &pfn
);
443 for (i
= 1; i
< n_pages
; ++i
, start
+= PAGE_SIZE
) {
445 ret
= follow_pfn(vma
, start
, &pfn
);
448 pr_err("no page for address %lu\n", start
);
451 if (pfn
!= prev_pfn
+ 1)
459 static int vb2_dc_get_user_pages(unsigned long start
, struct page
**pages
,
460 int n_pages
, struct vm_area_struct
*vma
, int write
)
462 if (vma_is_io(vma
)) {
465 for (i
= 0; i
< n_pages
; ++i
, start
+= PAGE_SIZE
) {
467 int ret
= follow_pfn(vma
, start
, &pfn
);
473 pr_err("no page for address %lu\n", start
);
476 pages
[i
] = pfn_to_page(pfn
);
481 n
= get_user_pages(current
, current
->mm
, start
& PAGE_MASK
,
482 n_pages
, write
, 1, pages
, NULL
);
483 /* negative error means that no page was pinned */
486 pr_err("got only %d of %d user pages\n", n
, n_pages
);
488 put_page(pages
[--n
]);
496 static void vb2_dc_put_dirty_page(struct page
*page
)
498 set_page_dirty_lock(page
);
502 static void vb2_dc_put_userptr(void *buf_priv
)
504 struct vb2_dc_buf
*buf
= buf_priv
;
505 struct sg_table
*sgt
= buf
->dma_sgt
;
508 dma_unmap_sg(buf
->dev
, sgt
->sgl
, sgt
->orig_nents
, buf
->dma_dir
);
509 if (!vma_is_io(buf
->vma
))
510 vb2_dc_sgt_foreach_page(sgt
, vb2_dc_put_dirty_page
);
515 vb2_put_vma(buf
->vma
);
520 * For some kind of reserved memory there might be no struct page available,
521 * so all that can be done to support such 'pages' is to try to convert
522 * pfn to dma address or at the last resort just assume that
523 * dma address == physical address (like it has been assumed in earlier version
524 * of videobuf2-dma-contig
527 #ifdef __arch_pfn_to_dma
528 static inline dma_addr_t
vb2_dc_pfn_to_dma(struct device
*dev
, unsigned long pfn
)
530 return (dma_addr_t
)__arch_pfn_to_dma(dev
, pfn
);
532 #elif defined(__pfn_to_bus)
533 static inline dma_addr_t
vb2_dc_pfn_to_dma(struct device
*dev
, unsigned long pfn
)
535 return (dma_addr_t
)__pfn_to_bus(pfn
);
537 #elif defined(__pfn_to_phys)
538 static inline dma_addr_t
vb2_dc_pfn_to_dma(struct device
*dev
, unsigned long pfn
)
540 return (dma_addr_t
)__pfn_to_phys(pfn
);
543 static inline dma_addr_t
vb2_dc_pfn_to_dma(struct device
*dev
, unsigned long pfn
)
545 /* really, we cannot do anything better at this point */
546 return (dma_addr_t
)(pfn
) << PAGE_SHIFT
;
550 static void *vb2_dc_get_userptr(void *alloc_ctx
, unsigned long vaddr
,
551 unsigned long size
, int write
)
553 struct vb2_dc_conf
*conf
= alloc_ctx
;
554 struct vb2_dc_buf
*buf
;
557 unsigned long offset
;
561 struct vm_area_struct
*vma
;
562 struct sg_table
*sgt
;
563 unsigned long contig_size
;
564 unsigned long dma_align
= dma_get_cache_alignment();
566 /* Only cache aligned DMA transfers are reliable */
567 if (!IS_ALIGNED(vaddr
| size
, dma_align
)) {
568 pr_debug("user data must be aligned to %lu bytes\n", dma_align
);
569 return ERR_PTR(-EINVAL
);
573 pr_debug("size is zero\n");
574 return ERR_PTR(-EINVAL
);
577 buf
= kzalloc(sizeof *buf
, GFP_KERNEL
);
579 return ERR_PTR(-ENOMEM
);
581 buf
->dev
= conf
->dev
;
582 buf
->dma_dir
= write
? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
584 start
= vaddr
& PAGE_MASK
;
585 offset
= vaddr
& ~PAGE_MASK
;
586 end
= PAGE_ALIGN(vaddr
+ size
);
587 n_pages
= (end
- start
) >> PAGE_SHIFT
;
589 pages
= kmalloc(n_pages
* sizeof(pages
[0]), GFP_KERNEL
);
592 pr_err("failed to allocate pages table\n");
596 /* current->mm->mmap_sem is taken by videobuf2 core */
597 vma
= find_vma(current
->mm
, vaddr
);
599 pr_err("no vma for address %lu\n", vaddr
);
604 if (vma
->vm_end
< vaddr
+ size
) {
605 pr_err("vma at %lu is too small for %lu bytes\n", vaddr
, size
);
610 buf
->vma
= vb2_get_vma(vma
);
612 pr_err("failed to copy vma\n");
617 /* extract page list from userspace mapping */
618 ret
= vb2_dc_get_user_pages(start
, pages
, n_pages
, vma
, write
);
621 if (vb2_dc_get_user_pfn(start
, n_pages
, vma
, &pfn
) == 0) {
622 buf
->dma_addr
= vb2_dc_pfn_to_dma(buf
->dev
, pfn
);
628 pr_err("failed to get user pages\n");
632 sgt
= kzalloc(sizeof(*sgt
), GFP_KERNEL
);
634 pr_err("failed to allocate sg table\n");
636 goto fail_get_user_pages
;
639 ret
= sg_alloc_table_from_pages(sgt
, pages
, n_pages
,
640 offset
, size
, GFP_KERNEL
);
642 pr_err("failed to initialize sg table\n");
646 /* pages are no longer needed */
650 sgt
->nents
= dma_map_sg(buf
->dev
, sgt
->sgl
, sgt
->orig_nents
,
652 if (sgt
->nents
<= 0) {
653 pr_err("failed to map scatterlist\n");
658 contig_size
= vb2_dc_get_contiguous_size(sgt
);
659 if (contig_size
< size
) {
660 pr_err("contiguous mapping is too small %lu/%lu\n",
666 buf
->dma_addr
= sg_dma_address(sgt
->sgl
);
673 dma_unmap_sg(buf
->dev
, sgt
->sgl
, sgt
->orig_nents
, buf
->dma_dir
);
676 if (!vma_is_io(buf
->vma
))
677 vb2_dc_sgt_foreach_page(sgt
, put_page
);
684 if (pages
&& !vma_is_io(buf
->vma
))
686 put_page(pages
[--n_pages
]);
689 vb2_put_vma(buf
->vma
);
692 kfree(pages
); /* kfree is NULL-proof */
700 /*********************************************/
701 /* callbacks for DMABUF buffers */
702 /*********************************************/
704 static int vb2_dc_map_dmabuf(void *mem_priv
)
706 struct vb2_dc_buf
*buf
= mem_priv
;
707 struct sg_table
*sgt
;
708 unsigned long contig_size
;
710 if (WARN_ON(!buf
->db_attach
)) {
711 pr_err("trying to pin a non attached buffer\n");
715 if (WARN_ON(buf
->dma_sgt
)) {
716 pr_err("dmabuf buffer is already pinned\n");
720 /* get the associated scatterlist for this buffer */
721 sgt
= dma_buf_map_attachment(buf
->db_attach
, buf
->dma_dir
);
722 if (IS_ERR_OR_NULL(sgt
)) {
723 pr_err("Error getting dmabuf scatterlist\n");
727 /* checking if dmabuf is big enough to store contiguous chunk */
728 contig_size
= vb2_dc_get_contiguous_size(sgt
);
729 if (contig_size
< buf
->size
) {
730 pr_err("contiguous chunk is too small %lu/%lu b\n",
731 contig_size
, buf
->size
);
732 dma_buf_unmap_attachment(buf
->db_attach
, sgt
, buf
->dma_dir
);
736 buf
->dma_addr
= sg_dma_address(sgt
->sgl
);
742 static void vb2_dc_unmap_dmabuf(void *mem_priv
)
744 struct vb2_dc_buf
*buf
= mem_priv
;
745 struct sg_table
*sgt
= buf
->dma_sgt
;
747 if (WARN_ON(!buf
->db_attach
)) {
748 pr_err("trying to unpin a not attached buffer\n");
753 pr_err("dmabuf buffer is already unpinned\n");
757 dma_buf_unmap_attachment(buf
->db_attach
, sgt
, buf
->dma_dir
);
763 static void vb2_dc_detach_dmabuf(void *mem_priv
)
765 struct vb2_dc_buf
*buf
= mem_priv
;
767 /* if vb2 works correctly you should never detach mapped buffer */
768 if (WARN_ON(buf
->dma_addr
))
769 vb2_dc_unmap_dmabuf(buf
);
771 /* detach this attachment */
772 dma_buf_detach(buf
->db_attach
->dmabuf
, buf
->db_attach
);
776 static void *vb2_dc_attach_dmabuf(void *alloc_ctx
, struct dma_buf
*dbuf
,
777 unsigned long size
, int write
)
779 struct vb2_dc_conf
*conf
= alloc_ctx
;
780 struct vb2_dc_buf
*buf
;
781 struct dma_buf_attachment
*dba
;
783 if (dbuf
->size
< size
)
784 return ERR_PTR(-EFAULT
);
786 buf
= kzalloc(sizeof(*buf
), GFP_KERNEL
);
788 return ERR_PTR(-ENOMEM
);
790 buf
->dev
= conf
->dev
;
791 /* create attachment for the dmabuf with the user device */
792 dba
= dma_buf_attach(dbuf
, buf
->dev
);
794 pr_err("failed to attach dmabuf\n");
799 buf
->dma_dir
= write
? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
801 buf
->db_attach
= dba
;
806 /*********************************************/
807 /* DMA CONTIG exported functions */
808 /*********************************************/
810 const struct vb2_mem_ops vb2_dma_contig_memops
= {
811 .alloc
= vb2_dc_alloc
,
813 .get_dmabuf
= vb2_dc_get_dmabuf
,
814 .cookie
= vb2_dc_cookie
,
815 .vaddr
= vb2_dc_vaddr
,
817 .get_userptr
= vb2_dc_get_userptr
,
818 .put_userptr
= vb2_dc_put_userptr
,
819 .prepare
= vb2_dc_prepare
,
820 .finish
= vb2_dc_finish
,
821 .map_dmabuf
= vb2_dc_map_dmabuf
,
822 .unmap_dmabuf
= vb2_dc_unmap_dmabuf
,
823 .attach_dmabuf
= vb2_dc_attach_dmabuf
,
824 .detach_dmabuf
= vb2_dc_detach_dmabuf
,
825 .num_users
= vb2_dc_num_users
,
827 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops
);
829 void *vb2_dma_contig_init_ctx(struct device
*dev
)
831 struct vb2_dc_conf
*conf
;
833 conf
= kzalloc(sizeof *conf
, GFP_KERNEL
);
835 return ERR_PTR(-ENOMEM
);
841 EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx
);
843 void vb2_dma_contig_cleanup_ctx(void *alloc_ctx
)
847 EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx
);
849 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
850 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
851 MODULE_LICENSE("GPL");