2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
4 * Copyright (C) 2010 Samsung Electronics
6 * Author: Pawel Osciak <pawel@osciak.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/refcount.h>
16 #include <linux/scatterlist.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dma-mapping.h>
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-dma-contig.h>
23 #include <media/videobuf2-memops.h>
32 enum dma_data_direction dma_dir
;
33 struct sg_table
*dma_sgt
;
34 struct frame_vector
*vec
;
37 struct vb2_vmarea_handler handler
;
39 struct sg_table
*sgt_base
;
42 struct dma_buf_attachment
*db_attach
;
45 /*********************************************/
46 /* scatterlist table functions */
47 /*********************************************/
49 static unsigned long vb2_dc_get_contiguous_size(struct sg_table
*sgt
)
51 struct scatterlist
*s
;
52 dma_addr_t expected
= sg_dma_address(sgt
->sgl
);
54 unsigned long size
= 0;
56 for_each_sg(sgt
->sgl
, s
, sgt
->nents
, i
) {
57 if (sg_dma_address(s
) != expected
)
59 expected
= sg_dma_address(s
) + sg_dma_len(s
);
60 size
+= sg_dma_len(s
);
65 /*********************************************/
66 /* callbacks for all buffers */
67 /*********************************************/
69 static void *vb2_dc_cookie(void *buf_priv
)
71 struct vb2_dc_buf
*buf
= buf_priv
;
73 return &buf
->dma_addr
;
76 static void *vb2_dc_vaddr(void *buf_priv
)
78 struct vb2_dc_buf
*buf
= buf_priv
;
80 if (!buf
->vaddr
&& buf
->db_attach
)
81 buf
->vaddr
= dma_buf_vmap(buf
->db_attach
->dmabuf
);
86 static unsigned int vb2_dc_num_users(void *buf_priv
)
88 struct vb2_dc_buf
*buf
= buf_priv
;
90 return refcount_read(&buf
->refcount
);
93 static void vb2_dc_prepare(void *buf_priv
)
95 struct vb2_dc_buf
*buf
= buf_priv
;
96 struct sg_table
*sgt
= buf
->dma_sgt
;
98 /* DMABUF exporter will flush the cache for us */
99 if (!sgt
|| buf
->db_attach
)
102 dma_sync_sg_for_device(buf
->dev
, sgt
->sgl
, sgt
->orig_nents
,
106 static void vb2_dc_finish(void *buf_priv
)
108 struct vb2_dc_buf
*buf
= buf_priv
;
109 struct sg_table
*sgt
= buf
->dma_sgt
;
111 /* DMABUF exporter will flush the cache for us */
112 if (!sgt
|| buf
->db_attach
)
115 dma_sync_sg_for_cpu(buf
->dev
, sgt
->sgl
, sgt
->orig_nents
, buf
->dma_dir
);
118 /*********************************************/
119 /* callbacks for MMAP buffers */
120 /*********************************************/
122 static void vb2_dc_put(void *buf_priv
)
124 struct vb2_dc_buf
*buf
= buf_priv
;
126 if (!refcount_dec_and_test(&buf
->refcount
))
130 sg_free_table(buf
->sgt_base
);
131 kfree(buf
->sgt_base
);
133 dma_free_attrs(buf
->dev
, buf
->size
, buf
->cookie
, buf
->dma_addr
,
135 put_device(buf
->dev
);
139 static void *vb2_dc_alloc(struct device
*dev
, unsigned long attrs
,
140 unsigned long size
, enum dma_data_direction dma_dir
,
143 struct vb2_dc_buf
*buf
;
146 return ERR_PTR(-EINVAL
);
148 buf
= kzalloc(sizeof *buf
, GFP_KERNEL
);
150 return ERR_PTR(-ENOMEM
);
154 buf
->cookie
= dma_alloc_attrs(dev
, size
, &buf
->dma_addr
,
155 GFP_KERNEL
| gfp_flags
, buf
->attrs
);
157 dev_err(dev
, "dma_alloc_coherent of size %ld failed\n", size
);
159 return ERR_PTR(-ENOMEM
);
162 if ((buf
->attrs
& DMA_ATTR_NO_KERNEL_MAPPING
) == 0)
163 buf
->vaddr
= buf
->cookie
;
165 /* Prevent the device from being released while the buffer is used */
166 buf
->dev
= get_device(dev
);
168 buf
->dma_dir
= dma_dir
;
170 buf
->handler
.refcount
= &buf
->refcount
;
171 buf
->handler
.put
= vb2_dc_put
;
172 buf
->handler
.arg
= buf
;
174 refcount_set(&buf
->refcount
, 1);
179 static int vb2_dc_mmap(void *buf_priv
, struct vm_area_struct
*vma
)
181 struct vb2_dc_buf
*buf
= buf_priv
;
185 printk(KERN_ERR
"No buffer to map\n");
190 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
195 ret
= dma_mmap_attrs(buf
->dev
, vma
, buf
->cookie
,
196 buf
->dma_addr
, buf
->size
, buf
->attrs
);
199 pr_err("Remapping memory failed, error: %d\n", ret
);
203 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
204 vma
->vm_private_data
= &buf
->handler
;
205 vma
->vm_ops
= &vb2_common_vm_ops
;
207 vma
->vm_ops
->open(vma
);
209 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
210 __func__
, (unsigned long)buf
->dma_addr
, vma
->vm_start
,
216 /*********************************************/
217 /* DMABUF ops for exporters */
218 /*********************************************/
220 struct vb2_dc_attachment
{
222 enum dma_data_direction dma_dir
;
225 static int vb2_dc_dmabuf_ops_attach(struct dma_buf
*dbuf
,
226 struct dma_buf_attachment
*dbuf_attach
)
228 struct vb2_dc_attachment
*attach
;
230 struct scatterlist
*rd
, *wr
;
231 struct sg_table
*sgt
;
232 struct vb2_dc_buf
*buf
= dbuf
->priv
;
235 attach
= kzalloc(sizeof(*attach
), GFP_KERNEL
);
240 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
241 * map the same scatter list to multiple attachments at the same time.
243 ret
= sg_alloc_table(sgt
, buf
->sgt_base
->orig_nents
, GFP_KERNEL
);
249 rd
= buf
->sgt_base
->sgl
;
251 for (i
= 0; i
< sgt
->orig_nents
; ++i
) {
252 sg_set_page(wr
, sg_page(rd
), rd
->length
, rd
->offset
);
257 attach
->dma_dir
= DMA_NONE
;
258 dbuf_attach
->priv
= attach
;
263 static void vb2_dc_dmabuf_ops_detach(struct dma_buf
*dbuf
,
264 struct dma_buf_attachment
*db_attach
)
266 struct vb2_dc_attachment
*attach
= db_attach
->priv
;
267 struct sg_table
*sgt
;
274 /* release the scatterlist cache */
275 if (attach
->dma_dir
!= DMA_NONE
)
276 dma_unmap_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
,
280 db_attach
->priv
= NULL
;
283 static struct sg_table
*vb2_dc_dmabuf_ops_map(
284 struct dma_buf_attachment
*db_attach
, enum dma_data_direction dma_dir
)
286 struct vb2_dc_attachment
*attach
= db_attach
->priv
;
287 /* stealing dmabuf mutex to serialize map/unmap operations */
288 struct mutex
*lock
= &db_attach
->dmabuf
->lock
;
289 struct sg_table
*sgt
;
294 /* return previously mapped sg table */
295 if (attach
->dma_dir
== dma_dir
) {
300 /* release any previous cache */
301 if (attach
->dma_dir
!= DMA_NONE
) {
302 dma_unmap_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
,
304 attach
->dma_dir
= DMA_NONE
;
307 /* mapping to the client with new direction */
308 sgt
->nents
= dma_map_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
,
311 pr_err("failed to map scatterlist\n");
313 return ERR_PTR(-EIO
);
316 attach
->dma_dir
= dma_dir
;
323 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment
*db_attach
,
324 struct sg_table
*sgt
, enum dma_data_direction dma_dir
)
326 /* nothing to be done here */
329 static void vb2_dc_dmabuf_ops_release(struct dma_buf
*dbuf
)
331 /* drop reference obtained in vb2_dc_get_dmabuf */
332 vb2_dc_put(dbuf
->priv
);
335 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf
*dbuf
, unsigned long pgnum
)
337 struct vb2_dc_buf
*buf
= dbuf
->priv
;
339 return buf
->vaddr
? buf
->vaddr
+ pgnum
* PAGE_SIZE
: NULL
;
342 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf
*dbuf
)
344 struct vb2_dc_buf
*buf
= dbuf
->priv
;
349 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf
*dbuf
,
350 struct vm_area_struct
*vma
)
352 return vb2_dc_mmap(dbuf
->priv
, vma
);
355 static const struct dma_buf_ops vb2_dc_dmabuf_ops
= {
356 .attach
= vb2_dc_dmabuf_ops_attach
,
357 .detach
= vb2_dc_dmabuf_ops_detach
,
358 .map_dma_buf
= vb2_dc_dmabuf_ops_map
,
359 .unmap_dma_buf
= vb2_dc_dmabuf_ops_unmap
,
360 .map
= vb2_dc_dmabuf_ops_kmap
,
361 .vmap
= vb2_dc_dmabuf_ops_vmap
,
362 .mmap
= vb2_dc_dmabuf_ops_mmap
,
363 .release
= vb2_dc_dmabuf_ops_release
,
366 static struct sg_table
*vb2_dc_get_base_sgt(struct vb2_dc_buf
*buf
)
369 struct sg_table
*sgt
;
371 sgt
= kmalloc(sizeof(*sgt
), GFP_KERNEL
);
373 dev_err(buf
->dev
, "failed to alloc sg table\n");
377 ret
= dma_get_sgtable_attrs(buf
->dev
, sgt
, buf
->cookie
, buf
->dma_addr
,
378 buf
->size
, buf
->attrs
);
380 dev_err(buf
->dev
, "failed to get scatterlist from DMA API\n");
388 static struct dma_buf
*vb2_dc_get_dmabuf(void *buf_priv
, unsigned long flags
)
390 struct vb2_dc_buf
*buf
= buf_priv
;
391 struct dma_buf
*dbuf
;
392 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
394 exp_info
.ops
= &vb2_dc_dmabuf_ops
;
395 exp_info
.size
= buf
->size
;
396 exp_info
.flags
= flags
;
400 buf
->sgt_base
= vb2_dc_get_base_sgt(buf
);
402 if (WARN_ON(!buf
->sgt_base
))
405 dbuf
= dma_buf_export(&exp_info
);
409 /* dmabuf keeps reference to vb2 buffer */
410 refcount_inc(&buf
->refcount
);
415 /*********************************************/
416 /* callbacks for USERPTR buffers */
417 /*********************************************/
419 static void vb2_dc_put_userptr(void *buf_priv
)
421 struct vb2_dc_buf
*buf
= buf_priv
;
422 struct sg_table
*sgt
= buf
->dma_sgt
;
428 * No need to sync to CPU, it's already synced to the CPU
429 * since the finish() memop will have been called before this.
431 dma_unmap_sg_attrs(buf
->dev
, sgt
->sgl
, sgt
->orig_nents
,
432 buf
->dma_dir
, DMA_ATTR_SKIP_CPU_SYNC
);
433 pages
= frame_vector_pages(buf
->vec
);
434 /* sgt should exist only if vector contains pages... */
435 BUG_ON(IS_ERR(pages
));
436 if (buf
->dma_dir
== DMA_FROM_DEVICE
||
437 buf
->dma_dir
== DMA_BIDIRECTIONAL
)
438 for (i
= 0; i
< frame_vector_count(buf
->vec
); i
++)
439 set_page_dirty_lock(pages
[i
]);
443 vb2_destroy_framevec(buf
->vec
);
448 * For some kind of reserved memory there might be no struct page available,
449 * so all that can be done to support such 'pages' is to try to convert
450 * pfn to dma address or at the last resort just assume that
451 * dma address == physical address (like it has been assumed in earlier version
452 * of videobuf2-dma-contig
455 #ifdef __arch_pfn_to_dma
456 static inline dma_addr_t
vb2_dc_pfn_to_dma(struct device
*dev
, unsigned long pfn
)
458 return (dma_addr_t
)__arch_pfn_to_dma(dev
, pfn
);
460 #elif defined(__pfn_to_bus)
461 static inline dma_addr_t
vb2_dc_pfn_to_dma(struct device
*dev
, unsigned long pfn
)
463 return (dma_addr_t
)__pfn_to_bus(pfn
);
465 #elif defined(__pfn_to_phys)
466 static inline dma_addr_t
vb2_dc_pfn_to_dma(struct device
*dev
, unsigned long pfn
)
468 return (dma_addr_t
)__pfn_to_phys(pfn
);
471 static inline dma_addr_t
vb2_dc_pfn_to_dma(struct device
*dev
, unsigned long pfn
)
473 /* really, we cannot do anything better at this point */
474 return (dma_addr_t
)(pfn
) << PAGE_SHIFT
;
478 static void *vb2_dc_get_userptr(struct device
*dev
, unsigned long vaddr
,
479 unsigned long size
, enum dma_data_direction dma_dir
)
481 struct vb2_dc_buf
*buf
;
482 struct frame_vector
*vec
;
486 struct sg_table
*sgt
;
487 unsigned long contig_size
;
488 unsigned long dma_align
= dma_get_cache_alignment();
490 /* Only cache aligned DMA transfers are reliable */
491 if (!IS_ALIGNED(vaddr
| size
, dma_align
)) {
492 pr_debug("user data must be aligned to %lu bytes\n", dma_align
);
493 return ERR_PTR(-EINVAL
);
497 pr_debug("size is zero\n");
498 return ERR_PTR(-EINVAL
);
502 return ERR_PTR(-EINVAL
);
504 buf
= kzalloc(sizeof *buf
, GFP_KERNEL
);
506 return ERR_PTR(-ENOMEM
);
509 buf
->dma_dir
= dma_dir
;
511 offset
= lower_32_bits(offset_in_page(vaddr
));
512 vec
= vb2_create_framevec(vaddr
, size
, dma_dir
== DMA_FROM_DEVICE
||
513 dma_dir
== DMA_BIDIRECTIONAL
);
519 n_pages
= frame_vector_count(vec
);
520 ret
= frame_vector_to_pages(vec
);
522 unsigned long *nums
= frame_vector_pfns(vec
);
525 * Failed to convert to pages... Check the memory is physically
526 * contiguous and use direct mapping
528 for (i
= 1; i
< n_pages
; i
++)
529 if (nums
[i
-1] + 1 != nums
[i
])
531 buf
->dma_addr
= vb2_dc_pfn_to_dma(buf
->dev
, nums
[0]);
535 sgt
= kzalloc(sizeof(*sgt
), GFP_KERNEL
);
537 pr_err("failed to allocate sg table\n");
542 ret
= sg_alloc_table_from_pages(sgt
, frame_vector_pages(vec
), n_pages
,
543 offset
, size
, GFP_KERNEL
);
545 pr_err("failed to initialize sg table\n");
550 * No need to sync to the device, this will happen later when the
551 * prepare() memop is called.
553 sgt
->nents
= dma_map_sg_attrs(buf
->dev
, sgt
->sgl
, sgt
->orig_nents
,
554 buf
->dma_dir
, DMA_ATTR_SKIP_CPU_SYNC
);
555 if (sgt
->nents
<= 0) {
556 pr_err("failed to map scatterlist\n");
561 contig_size
= vb2_dc_get_contiguous_size(sgt
);
562 if (contig_size
< size
) {
563 pr_err("contiguous mapping is too small %lu/%lu\n",
569 buf
->dma_addr
= sg_dma_address(sgt
->sgl
);
577 dma_unmap_sg_attrs(buf
->dev
, sgt
->sgl
, sgt
->orig_nents
,
578 buf
->dma_dir
, DMA_ATTR_SKIP_CPU_SYNC
);
587 vb2_destroy_framevec(vec
);
595 /*********************************************/
596 /* callbacks for DMABUF buffers */
597 /*********************************************/
599 static int vb2_dc_map_dmabuf(void *mem_priv
)
601 struct vb2_dc_buf
*buf
= mem_priv
;
602 struct sg_table
*sgt
;
603 unsigned long contig_size
;
605 if (WARN_ON(!buf
->db_attach
)) {
606 pr_err("trying to pin a non attached buffer\n");
610 if (WARN_ON(buf
->dma_sgt
)) {
611 pr_err("dmabuf buffer is already pinned\n");
615 /* get the associated scatterlist for this buffer */
616 sgt
= dma_buf_map_attachment(buf
->db_attach
, buf
->dma_dir
);
618 pr_err("Error getting dmabuf scatterlist\n");
622 /* checking if dmabuf is big enough to store contiguous chunk */
623 contig_size
= vb2_dc_get_contiguous_size(sgt
);
624 if (contig_size
< buf
->size
) {
625 pr_err("contiguous chunk is too small %lu/%lu b\n",
626 contig_size
, buf
->size
);
627 dma_buf_unmap_attachment(buf
->db_attach
, sgt
, buf
->dma_dir
);
631 buf
->dma_addr
= sg_dma_address(sgt
->sgl
);
638 static void vb2_dc_unmap_dmabuf(void *mem_priv
)
640 struct vb2_dc_buf
*buf
= mem_priv
;
641 struct sg_table
*sgt
= buf
->dma_sgt
;
643 if (WARN_ON(!buf
->db_attach
)) {
644 pr_err("trying to unpin a not attached buffer\n");
649 pr_err("dmabuf buffer is already unpinned\n");
654 dma_buf_vunmap(buf
->db_attach
->dmabuf
, buf
->vaddr
);
657 dma_buf_unmap_attachment(buf
->db_attach
, sgt
, buf
->dma_dir
);
663 static void vb2_dc_detach_dmabuf(void *mem_priv
)
665 struct vb2_dc_buf
*buf
= mem_priv
;
667 /* if vb2 works correctly you should never detach mapped buffer */
668 if (WARN_ON(buf
->dma_addr
))
669 vb2_dc_unmap_dmabuf(buf
);
671 /* detach this attachment */
672 dma_buf_detach(buf
->db_attach
->dmabuf
, buf
->db_attach
);
676 static void *vb2_dc_attach_dmabuf(struct device
*dev
, struct dma_buf
*dbuf
,
677 unsigned long size
, enum dma_data_direction dma_dir
)
679 struct vb2_dc_buf
*buf
;
680 struct dma_buf_attachment
*dba
;
682 if (dbuf
->size
< size
)
683 return ERR_PTR(-EFAULT
);
686 return ERR_PTR(-EINVAL
);
688 buf
= kzalloc(sizeof(*buf
), GFP_KERNEL
);
690 return ERR_PTR(-ENOMEM
);
693 /* create attachment for the dmabuf with the user device */
694 dba
= dma_buf_attach(dbuf
, buf
->dev
);
696 pr_err("failed to attach dmabuf\n");
701 buf
->dma_dir
= dma_dir
;
703 buf
->db_attach
= dba
;
708 /*********************************************/
709 /* DMA CONTIG exported functions */
710 /*********************************************/
712 const struct vb2_mem_ops vb2_dma_contig_memops
= {
713 .alloc
= vb2_dc_alloc
,
715 .get_dmabuf
= vb2_dc_get_dmabuf
,
716 .cookie
= vb2_dc_cookie
,
717 .vaddr
= vb2_dc_vaddr
,
719 .get_userptr
= vb2_dc_get_userptr
,
720 .put_userptr
= vb2_dc_put_userptr
,
721 .prepare
= vb2_dc_prepare
,
722 .finish
= vb2_dc_finish
,
723 .map_dmabuf
= vb2_dc_map_dmabuf
,
724 .unmap_dmabuf
= vb2_dc_unmap_dmabuf
,
725 .attach_dmabuf
= vb2_dc_attach_dmabuf
,
726 .detach_dmabuf
= vb2_dc_detach_dmabuf
,
727 .num_users
= vb2_dc_num_users
,
729 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops
);
732 * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
733 * @dev: device for configuring DMA parameters
734 * @size: size of DMA max segment size to set
736 * To allow mapping the scatter-list into a single chunk in the DMA
737 * address space, the device is required to have the DMA max segment
738 * size parameter set to a value larger than the buffer size. Otherwise,
739 * the DMA-mapping subsystem will split the mapping into max segment
740 * size chunks. This function sets the DMA max segment size
741 * parameter to let DMA-mapping map a buffer as a single chunk in DMA
743 * This code assumes that the DMA-mapping subsystem will merge all
744 * scatterlist segments if this is really possible (for example when
745 * an IOMMU is available and enabled).
746 * Ideally, this parameter should be set by the generic bus code, but it
747 * is left with the default 64KiB value due to historical litmiations in
748 * other subsystems (like limited USB host drivers) and there no good
749 * place to set it to the proper value.
750 * This function should be called from the drivers, which are known to
751 * operate on platforms with IOMMU and provide access to shared buffers
752 * (either USERPTR or DMABUF). This should be done before initializing
755 int vb2_dma_contig_set_max_seg_size(struct device
*dev
, unsigned int size
)
757 if (!dev
->dma_parms
) {
758 dev
->dma_parms
= kzalloc(sizeof(*dev
->dma_parms
), GFP_KERNEL
);
762 if (dma_get_max_seg_size(dev
) < size
)
763 return dma_set_max_seg_size(dev
, size
);
767 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size
);
770 * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
771 * @dev: device for configuring DMA parameters
773 * This function releases resources allocated to configure DMA parameters
774 * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
775 * device drivers on driver remove.
777 void vb2_dma_contig_clear_max_seg_size(struct device
*dev
)
779 kfree(dev
->dma_parms
);
780 dev
->dma_parms
= NULL
;
782 EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size
);
784 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
785 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
786 MODULE_LICENSE("GPL");