Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cris-mirror.git] / drivers / media / common / videobuf2 / videobuf2-dma-contig.c
blobf1178f6f434d0f76d419845d8789d87095ed355a
1 /*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
4 * Copyright (C) 2010 Samsung Electronics
6 * Author: Pawel Osciak <pawel@osciak.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/refcount.h>
16 #include <linux/scatterlist.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dma-mapping.h>
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-dma-contig.h>
23 #include <media/videobuf2-memops.h>
25 struct vb2_dc_buf {
26 struct device *dev;
27 void *vaddr;
28 unsigned long size;
29 void *cookie;
30 dma_addr_t dma_addr;
31 unsigned long attrs;
32 enum dma_data_direction dma_dir;
33 struct sg_table *dma_sgt;
34 struct frame_vector *vec;
36 /* MMAP related */
37 struct vb2_vmarea_handler handler;
38 refcount_t refcount;
39 struct sg_table *sgt_base;
41 /* DMABUF related */
42 struct dma_buf_attachment *db_attach;
45 /*********************************************/
46 /* scatterlist table functions */
47 /*********************************************/
49 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
51 struct scatterlist *s;
52 dma_addr_t expected = sg_dma_address(sgt->sgl);
53 unsigned int i;
54 unsigned long size = 0;
56 for_each_sg(sgt->sgl, s, sgt->nents, i) {
57 if (sg_dma_address(s) != expected)
58 break;
59 expected = sg_dma_address(s) + sg_dma_len(s);
60 size += sg_dma_len(s);
62 return size;
65 /*********************************************/
66 /* callbacks for all buffers */
67 /*********************************************/
69 static void *vb2_dc_cookie(void *buf_priv)
71 struct vb2_dc_buf *buf = buf_priv;
73 return &buf->dma_addr;
76 static void *vb2_dc_vaddr(void *buf_priv)
78 struct vb2_dc_buf *buf = buf_priv;
80 if (!buf->vaddr && buf->db_attach)
81 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
83 return buf->vaddr;
86 static unsigned int vb2_dc_num_users(void *buf_priv)
88 struct vb2_dc_buf *buf = buf_priv;
90 return refcount_read(&buf->refcount);
93 static void vb2_dc_prepare(void *buf_priv)
95 struct vb2_dc_buf *buf = buf_priv;
96 struct sg_table *sgt = buf->dma_sgt;
98 /* DMABUF exporter will flush the cache for us */
99 if (!sgt || buf->db_attach)
100 return;
102 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
103 buf->dma_dir);
106 static void vb2_dc_finish(void *buf_priv)
108 struct vb2_dc_buf *buf = buf_priv;
109 struct sg_table *sgt = buf->dma_sgt;
111 /* DMABUF exporter will flush the cache for us */
112 if (!sgt || buf->db_attach)
113 return;
115 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
118 /*********************************************/
119 /* callbacks for MMAP buffers */
120 /*********************************************/
122 static void vb2_dc_put(void *buf_priv)
124 struct vb2_dc_buf *buf = buf_priv;
126 if (!refcount_dec_and_test(&buf->refcount))
127 return;
129 if (buf->sgt_base) {
130 sg_free_table(buf->sgt_base);
131 kfree(buf->sgt_base);
133 dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
134 buf->attrs);
135 put_device(buf->dev);
136 kfree(buf);
139 static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
140 unsigned long size, enum dma_data_direction dma_dir,
141 gfp_t gfp_flags)
143 struct vb2_dc_buf *buf;
145 if (WARN_ON(!dev))
146 return ERR_PTR(-EINVAL);
148 buf = kzalloc(sizeof *buf, GFP_KERNEL);
149 if (!buf)
150 return ERR_PTR(-ENOMEM);
152 if (attrs)
153 buf->attrs = attrs;
154 buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
155 GFP_KERNEL | gfp_flags, buf->attrs);
156 if (!buf->cookie) {
157 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
158 kfree(buf);
159 return ERR_PTR(-ENOMEM);
162 if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
163 buf->vaddr = buf->cookie;
165 /* Prevent the device from being released while the buffer is used */
166 buf->dev = get_device(dev);
167 buf->size = size;
168 buf->dma_dir = dma_dir;
170 buf->handler.refcount = &buf->refcount;
171 buf->handler.put = vb2_dc_put;
172 buf->handler.arg = buf;
174 refcount_set(&buf->refcount, 1);
176 return buf;
179 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
181 struct vb2_dc_buf *buf = buf_priv;
182 int ret;
184 if (!buf) {
185 printk(KERN_ERR "No buffer to map\n");
186 return -EINVAL;
190 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
191 * map whole buffer
193 vma->vm_pgoff = 0;
195 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
196 buf->dma_addr, buf->size, buf->attrs);
198 if (ret) {
199 pr_err("Remapping memory failed, error: %d\n", ret);
200 return ret;
203 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
204 vma->vm_private_data = &buf->handler;
205 vma->vm_ops = &vb2_common_vm_ops;
207 vma->vm_ops->open(vma);
209 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
210 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
211 buf->size);
213 return 0;
216 /*********************************************/
217 /* DMABUF ops for exporters */
218 /*********************************************/
220 struct vb2_dc_attachment {
221 struct sg_table sgt;
222 enum dma_data_direction dma_dir;
225 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
226 struct dma_buf_attachment *dbuf_attach)
228 struct vb2_dc_attachment *attach;
229 unsigned int i;
230 struct scatterlist *rd, *wr;
231 struct sg_table *sgt;
232 struct vb2_dc_buf *buf = dbuf->priv;
233 int ret;
235 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
236 if (!attach)
237 return -ENOMEM;
239 sgt = &attach->sgt;
240 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
241 * map the same scatter list to multiple attachments at the same time.
243 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
244 if (ret) {
245 kfree(attach);
246 return -ENOMEM;
249 rd = buf->sgt_base->sgl;
250 wr = sgt->sgl;
251 for (i = 0; i < sgt->orig_nents; ++i) {
252 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
253 rd = sg_next(rd);
254 wr = sg_next(wr);
257 attach->dma_dir = DMA_NONE;
258 dbuf_attach->priv = attach;
260 return 0;
263 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
264 struct dma_buf_attachment *db_attach)
266 struct vb2_dc_attachment *attach = db_attach->priv;
267 struct sg_table *sgt;
269 if (!attach)
270 return;
272 sgt = &attach->sgt;
274 /* release the scatterlist cache */
275 if (attach->dma_dir != DMA_NONE)
276 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
277 attach->dma_dir);
278 sg_free_table(sgt);
279 kfree(attach);
280 db_attach->priv = NULL;
283 static struct sg_table *vb2_dc_dmabuf_ops_map(
284 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
286 struct vb2_dc_attachment *attach = db_attach->priv;
287 /* stealing dmabuf mutex to serialize map/unmap operations */
288 struct mutex *lock = &db_attach->dmabuf->lock;
289 struct sg_table *sgt;
291 mutex_lock(lock);
293 sgt = &attach->sgt;
294 /* return previously mapped sg table */
295 if (attach->dma_dir == dma_dir) {
296 mutex_unlock(lock);
297 return sgt;
300 /* release any previous cache */
301 if (attach->dma_dir != DMA_NONE) {
302 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
303 attach->dma_dir);
304 attach->dma_dir = DMA_NONE;
307 /* mapping to the client with new direction */
308 sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
309 dma_dir);
310 if (!sgt->nents) {
311 pr_err("failed to map scatterlist\n");
312 mutex_unlock(lock);
313 return ERR_PTR(-EIO);
316 attach->dma_dir = dma_dir;
318 mutex_unlock(lock);
320 return sgt;
323 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
324 struct sg_table *sgt, enum dma_data_direction dma_dir)
326 /* nothing to be done here */
329 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
331 /* drop reference obtained in vb2_dc_get_dmabuf */
332 vb2_dc_put(dbuf->priv);
335 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
337 struct vb2_dc_buf *buf = dbuf->priv;
339 return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
342 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
344 struct vb2_dc_buf *buf = dbuf->priv;
346 return buf->vaddr;
349 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
350 struct vm_area_struct *vma)
352 return vb2_dc_mmap(dbuf->priv, vma);
355 static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
356 .attach = vb2_dc_dmabuf_ops_attach,
357 .detach = vb2_dc_dmabuf_ops_detach,
358 .map_dma_buf = vb2_dc_dmabuf_ops_map,
359 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
360 .map = vb2_dc_dmabuf_ops_kmap,
361 .map_atomic = vb2_dc_dmabuf_ops_kmap,
362 .vmap = vb2_dc_dmabuf_ops_vmap,
363 .mmap = vb2_dc_dmabuf_ops_mmap,
364 .release = vb2_dc_dmabuf_ops_release,
367 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
369 int ret;
370 struct sg_table *sgt;
372 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
373 if (!sgt) {
374 dev_err(buf->dev, "failed to alloc sg table\n");
375 return NULL;
378 ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
379 buf->size, buf->attrs);
380 if (ret < 0) {
381 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
382 kfree(sgt);
383 return NULL;
386 return sgt;
389 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
391 struct vb2_dc_buf *buf = buf_priv;
392 struct dma_buf *dbuf;
393 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
395 exp_info.ops = &vb2_dc_dmabuf_ops;
396 exp_info.size = buf->size;
397 exp_info.flags = flags;
398 exp_info.priv = buf;
400 if (!buf->sgt_base)
401 buf->sgt_base = vb2_dc_get_base_sgt(buf);
403 if (WARN_ON(!buf->sgt_base))
404 return NULL;
406 dbuf = dma_buf_export(&exp_info);
407 if (IS_ERR(dbuf))
408 return NULL;
410 /* dmabuf keeps reference to vb2 buffer */
411 refcount_inc(&buf->refcount);
413 return dbuf;
416 /*********************************************/
417 /* callbacks for USERPTR buffers */
418 /*********************************************/
420 static void vb2_dc_put_userptr(void *buf_priv)
422 struct vb2_dc_buf *buf = buf_priv;
423 struct sg_table *sgt = buf->dma_sgt;
424 int i;
425 struct page **pages;
427 if (sgt) {
429 * No need to sync to CPU, it's already synced to the CPU
430 * since the finish() memop will have been called before this.
432 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
433 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
434 pages = frame_vector_pages(buf->vec);
435 /* sgt should exist only if vector contains pages... */
436 BUG_ON(IS_ERR(pages));
437 if (buf->dma_dir == DMA_FROM_DEVICE ||
438 buf->dma_dir == DMA_BIDIRECTIONAL)
439 for (i = 0; i < frame_vector_count(buf->vec); i++)
440 set_page_dirty_lock(pages[i]);
441 sg_free_table(sgt);
442 kfree(sgt);
444 vb2_destroy_framevec(buf->vec);
445 kfree(buf);
449 * For some kind of reserved memory there might be no struct page available,
450 * so all that can be done to support such 'pages' is to try to convert
451 * pfn to dma address or at the last resort just assume that
452 * dma address == physical address (like it has been assumed in earlier version
453 * of videobuf2-dma-contig
456 #ifdef __arch_pfn_to_dma
457 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
459 return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
461 #elif defined(__pfn_to_bus)
462 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
464 return (dma_addr_t)__pfn_to_bus(pfn);
466 #elif defined(__pfn_to_phys)
467 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
469 return (dma_addr_t)__pfn_to_phys(pfn);
471 #else
472 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
474 /* really, we cannot do anything better at this point */
475 return (dma_addr_t)(pfn) << PAGE_SHIFT;
477 #endif
479 static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
480 unsigned long size, enum dma_data_direction dma_dir)
482 struct vb2_dc_buf *buf;
483 struct frame_vector *vec;
484 unsigned int offset;
485 int n_pages, i;
486 int ret = 0;
487 struct sg_table *sgt;
488 unsigned long contig_size;
489 unsigned long dma_align = dma_get_cache_alignment();
491 /* Only cache aligned DMA transfers are reliable */
492 if (!IS_ALIGNED(vaddr | size, dma_align)) {
493 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
494 return ERR_PTR(-EINVAL);
497 if (!size) {
498 pr_debug("size is zero\n");
499 return ERR_PTR(-EINVAL);
502 if (WARN_ON(!dev))
503 return ERR_PTR(-EINVAL);
505 buf = kzalloc(sizeof *buf, GFP_KERNEL);
506 if (!buf)
507 return ERR_PTR(-ENOMEM);
509 buf->dev = dev;
510 buf->dma_dir = dma_dir;
512 offset = lower_32_bits(offset_in_page(vaddr));
513 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
514 dma_dir == DMA_BIDIRECTIONAL);
515 if (IS_ERR(vec)) {
516 ret = PTR_ERR(vec);
517 goto fail_buf;
519 buf->vec = vec;
520 n_pages = frame_vector_count(vec);
521 ret = frame_vector_to_pages(vec);
522 if (ret < 0) {
523 unsigned long *nums = frame_vector_pfns(vec);
526 * Failed to convert to pages... Check the memory is physically
527 * contiguous and use direct mapping
529 for (i = 1; i < n_pages; i++)
530 if (nums[i-1] + 1 != nums[i])
531 goto fail_pfnvec;
532 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
533 goto out;
536 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
537 if (!sgt) {
538 pr_err("failed to allocate sg table\n");
539 ret = -ENOMEM;
540 goto fail_pfnvec;
543 ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
544 offset, size, GFP_KERNEL);
545 if (ret) {
546 pr_err("failed to initialize sg table\n");
547 goto fail_sgt;
551 * No need to sync to the device, this will happen later when the
552 * prepare() memop is called.
554 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
555 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
556 if (sgt->nents <= 0) {
557 pr_err("failed to map scatterlist\n");
558 ret = -EIO;
559 goto fail_sgt_init;
562 contig_size = vb2_dc_get_contiguous_size(sgt);
563 if (contig_size < size) {
564 pr_err("contiguous mapping is too small %lu/%lu\n",
565 contig_size, size);
566 ret = -EFAULT;
567 goto fail_map_sg;
570 buf->dma_addr = sg_dma_address(sgt->sgl);
571 buf->dma_sgt = sgt;
572 out:
573 buf->size = size;
575 return buf;
577 fail_map_sg:
578 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
579 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
581 fail_sgt_init:
582 sg_free_table(sgt);
584 fail_sgt:
585 kfree(sgt);
587 fail_pfnvec:
588 vb2_destroy_framevec(vec);
590 fail_buf:
591 kfree(buf);
593 return ERR_PTR(ret);
596 /*********************************************/
597 /* callbacks for DMABUF buffers */
598 /*********************************************/
600 static int vb2_dc_map_dmabuf(void *mem_priv)
602 struct vb2_dc_buf *buf = mem_priv;
603 struct sg_table *sgt;
604 unsigned long contig_size;
606 if (WARN_ON(!buf->db_attach)) {
607 pr_err("trying to pin a non attached buffer\n");
608 return -EINVAL;
611 if (WARN_ON(buf->dma_sgt)) {
612 pr_err("dmabuf buffer is already pinned\n");
613 return 0;
616 /* get the associated scatterlist for this buffer */
617 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
618 if (IS_ERR(sgt)) {
619 pr_err("Error getting dmabuf scatterlist\n");
620 return -EINVAL;
623 /* checking if dmabuf is big enough to store contiguous chunk */
624 contig_size = vb2_dc_get_contiguous_size(sgt);
625 if (contig_size < buf->size) {
626 pr_err("contiguous chunk is too small %lu/%lu b\n",
627 contig_size, buf->size);
628 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
629 return -EFAULT;
632 buf->dma_addr = sg_dma_address(sgt->sgl);
633 buf->dma_sgt = sgt;
634 buf->vaddr = NULL;
636 return 0;
639 static void vb2_dc_unmap_dmabuf(void *mem_priv)
641 struct vb2_dc_buf *buf = mem_priv;
642 struct sg_table *sgt = buf->dma_sgt;
644 if (WARN_ON(!buf->db_attach)) {
645 pr_err("trying to unpin a not attached buffer\n");
646 return;
649 if (WARN_ON(!sgt)) {
650 pr_err("dmabuf buffer is already unpinned\n");
651 return;
654 if (buf->vaddr) {
655 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
656 buf->vaddr = NULL;
658 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
660 buf->dma_addr = 0;
661 buf->dma_sgt = NULL;
664 static void vb2_dc_detach_dmabuf(void *mem_priv)
666 struct vb2_dc_buf *buf = mem_priv;
668 /* if vb2 works correctly you should never detach mapped buffer */
669 if (WARN_ON(buf->dma_addr))
670 vb2_dc_unmap_dmabuf(buf);
672 /* detach this attachment */
673 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
674 kfree(buf);
677 static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
678 unsigned long size, enum dma_data_direction dma_dir)
680 struct vb2_dc_buf *buf;
681 struct dma_buf_attachment *dba;
683 if (dbuf->size < size)
684 return ERR_PTR(-EFAULT);
686 if (WARN_ON(!dev))
687 return ERR_PTR(-EINVAL);
689 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
690 if (!buf)
691 return ERR_PTR(-ENOMEM);
693 buf->dev = dev;
694 /* create attachment for the dmabuf with the user device */
695 dba = dma_buf_attach(dbuf, buf->dev);
696 if (IS_ERR(dba)) {
697 pr_err("failed to attach dmabuf\n");
698 kfree(buf);
699 return dba;
702 buf->dma_dir = dma_dir;
703 buf->size = size;
704 buf->db_attach = dba;
706 return buf;
709 /*********************************************/
710 /* DMA CONTIG exported functions */
711 /*********************************************/
713 const struct vb2_mem_ops vb2_dma_contig_memops = {
714 .alloc = vb2_dc_alloc,
715 .put = vb2_dc_put,
716 .get_dmabuf = vb2_dc_get_dmabuf,
717 .cookie = vb2_dc_cookie,
718 .vaddr = vb2_dc_vaddr,
719 .mmap = vb2_dc_mmap,
720 .get_userptr = vb2_dc_get_userptr,
721 .put_userptr = vb2_dc_put_userptr,
722 .prepare = vb2_dc_prepare,
723 .finish = vb2_dc_finish,
724 .map_dmabuf = vb2_dc_map_dmabuf,
725 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
726 .attach_dmabuf = vb2_dc_attach_dmabuf,
727 .detach_dmabuf = vb2_dc_detach_dmabuf,
728 .num_users = vb2_dc_num_users,
730 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
733 * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
734 * @dev: device for configuring DMA parameters
735 * @size: size of DMA max segment size to set
737 * To allow mapping the scatter-list into a single chunk in the DMA
738 * address space, the device is required to have the DMA max segment
739 * size parameter set to a value larger than the buffer size. Otherwise,
740 * the DMA-mapping subsystem will split the mapping into max segment
741 * size chunks. This function sets the DMA max segment size
742 * parameter to let DMA-mapping map a buffer as a single chunk in DMA
743 * address space.
744 * This code assumes that the DMA-mapping subsystem will merge all
745 * scatterlist segments if this is really possible (for example when
746 * an IOMMU is available and enabled).
747 * Ideally, this parameter should be set by the generic bus code, but it
748 * is left with the default 64KiB value due to historical litmiations in
749 * other subsystems (like limited USB host drivers) and there no good
750 * place to set it to the proper value.
751 * This function should be called from the drivers, which are known to
752 * operate on platforms with IOMMU and provide access to shared buffers
753 * (either USERPTR or DMABUF). This should be done before initializing
754 * videobuf2 queue.
756 int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
758 if (!dev->dma_parms) {
759 dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
760 if (!dev->dma_parms)
761 return -ENOMEM;
763 if (dma_get_max_seg_size(dev) < size)
764 return dma_set_max_seg_size(dev, size);
766 return 0;
768 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
771 * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
772 * @dev: device for configuring DMA parameters
774 * This function releases resources allocated to configure DMA parameters
775 * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
776 * device drivers on driver remove.
778 void vb2_dma_contig_clear_max_seg_size(struct device *dev)
780 kfree(dev->dma_parms);
781 dev->dma_parms = NULL;
783 EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);
785 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
786 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
787 MODULE_LICENSE("GPL");