Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux...
[linux/fpc-iii.git] / drivers / media / v4l2-core / videobuf2-dma-contig.c
blob5361197f3e5741f6225db0393fb416f0dbeab03b
1 /*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
4 * Copyright (C) 2010 Samsung Electronics
6 * Author: Pawel Osciak <pawel@osciak.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
20 #include <media/videobuf2-v4l2.h>
21 #include <media/videobuf2-dma-contig.h>
22 #include <media/videobuf2-memops.h>
24 struct vb2_dc_conf {
25 struct device *dev;
26 struct dma_attrs attrs;
29 struct vb2_dc_buf {
30 struct device *dev;
31 void *vaddr;
32 unsigned long size;
33 void *cookie;
34 dma_addr_t dma_addr;
35 struct dma_attrs attrs;
36 enum dma_data_direction dma_dir;
37 struct sg_table *dma_sgt;
38 struct frame_vector *vec;
40 /* MMAP related */
41 struct vb2_vmarea_handler handler;
42 atomic_t refcount;
43 struct sg_table *sgt_base;
45 /* DMABUF related */
46 struct dma_buf_attachment *db_attach;
49 /*********************************************/
50 /* scatterlist table functions */
51 /*********************************************/
53 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
55 struct scatterlist *s;
56 dma_addr_t expected = sg_dma_address(sgt->sgl);
57 unsigned int i;
58 unsigned long size = 0;
60 for_each_sg(sgt->sgl, s, sgt->nents, i) {
61 if (sg_dma_address(s) != expected)
62 break;
63 expected = sg_dma_address(s) + sg_dma_len(s);
64 size += sg_dma_len(s);
66 return size;
69 /*********************************************/
70 /* callbacks for all buffers */
71 /*********************************************/
73 static void *vb2_dc_cookie(void *buf_priv)
75 struct vb2_dc_buf *buf = buf_priv;
77 return &buf->dma_addr;
80 static void *vb2_dc_vaddr(void *buf_priv)
82 struct vb2_dc_buf *buf = buf_priv;
84 if (!buf->vaddr && buf->db_attach)
85 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
87 return buf->vaddr;
90 static unsigned int vb2_dc_num_users(void *buf_priv)
92 struct vb2_dc_buf *buf = buf_priv;
94 return atomic_read(&buf->refcount);
97 static void vb2_dc_prepare(void *buf_priv)
99 struct vb2_dc_buf *buf = buf_priv;
100 struct sg_table *sgt = buf->dma_sgt;
102 /* DMABUF exporter will flush the cache for us */
103 if (!sgt || buf->db_attach)
104 return;
106 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
107 buf->dma_dir);
110 static void vb2_dc_finish(void *buf_priv)
112 struct vb2_dc_buf *buf = buf_priv;
113 struct sg_table *sgt = buf->dma_sgt;
115 /* DMABUF exporter will flush the cache for us */
116 if (!sgt || buf->db_attach)
117 return;
119 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
122 /*********************************************/
123 /* callbacks for MMAP buffers */
124 /*********************************************/
126 static void vb2_dc_put(void *buf_priv)
128 struct vb2_dc_buf *buf = buf_priv;
130 if (!atomic_dec_and_test(&buf->refcount))
131 return;
133 if (buf->sgt_base) {
134 sg_free_table(buf->sgt_base);
135 kfree(buf->sgt_base);
137 dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
138 &buf->attrs);
139 put_device(buf->dev);
140 kfree(buf);
143 static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size,
144 enum dma_data_direction dma_dir, gfp_t gfp_flags)
146 struct vb2_dc_conf *conf = alloc_ctx;
147 struct device *dev = conf->dev;
148 struct vb2_dc_buf *buf;
150 buf = kzalloc(sizeof *buf, GFP_KERNEL);
151 if (!buf)
152 return ERR_PTR(-ENOMEM);
154 buf->attrs = conf->attrs;
155 buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
156 GFP_KERNEL | gfp_flags, &buf->attrs);
157 if (!buf->cookie) {
158 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
159 kfree(buf);
160 return ERR_PTR(-ENOMEM);
163 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->attrs))
164 buf->vaddr = buf->cookie;
166 /* Prevent the device from being released while the buffer is used */
167 buf->dev = get_device(dev);
168 buf->size = size;
169 buf->dma_dir = dma_dir;
171 buf->handler.refcount = &buf->refcount;
172 buf->handler.put = vb2_dc_put;
173 buf->handler.arg = buf;
175 atomic_inc(&buf->refcount);
177 return buf;
180 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
182 struct vb2_dc_buf *buf = buf_priv;
183 int ret;
185 if (!buf) {
186 printk(KERN_ERR "No buffer to map\n");
187 return -EINVAL;
191 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
192 * map whole buffer
194 vma->vm_pgoff = 0;
196 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
197 buf->dma_addr, buf->size, &buf->attrs);
199 if (ret) {
200 pr_err("Remapping memory failed, error: %d\n", ret);
201 return ret;
204 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
205 vma->vm_private_data = &buf->handler;
206 vma->vm_ops = &vb2_common_vm_ops;
208 vma->vm_ops->open(vma);
210 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
211 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
212 buf->size);
214 return 0;
217 /*********************************************/
218 /* DMABUF ops for exporters */
219 /*********************************************/
221 struct vb2_dc_attachment {
222 struct sg_table sgt;
223 enum dma_data_direction dma_dir;
226 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
227 struct dma_buf_attachment *dbuf_attach)
229 struct vb2_dc_attachment *attach;
230 unsigned int i;
231 struct scatterlist *rd, *wr;
232 struct sg_table *sgt;
233 struct vb2_dc_buf *buf = dbuf->priv;
234 int ret;
236 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
237 if (!attach)
238 return -ENOMEM;
240 sgt = &attach->sgt;
241 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
242 * map the same scatter list to multiple attachments at the same time.
244 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
245 if (ret) {
246 kfree(attach);
247 return -ENOMEM;
250 rd = buf->sgt_base->sgl;
251 wr = sgt->sgl;
252 for (i = 0; i < sgt->orig_nents; ++i) {
253 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
254 rd = sg_next(rd);
255 wr = sg_next(wr);
258 attach->dma_dir = DMA_NONE;
259 dbuf_attach->priv = attach;
261 return 0;
264 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
265 struct dma_buf_attachment *db_attach)
267 struct vb2_dc_attachment *attach = db_attach->priv;
268 struct sg_table *sgt;
270 if (!attach)
271 return;
273 sgt = &attach->sgt;
275 /* release the scatterlist cache */
276 if (attach->dma_dir != DMA_NONE)
277 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
278 attach->dma_dir);
279 sg_free_table(sgt);
280 kfree(attach);
281 db_attach->priv = NULL;
284 static struct sg_table *vb2_dc_dmabuf_ops_map(
285 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
287 struct vb2_dc_attachment *attach = db_attach->priv;
288 /* stealing dmabuf mutex to serialize map/unmap operations */
289 struct mutex *lock = &db_attach->dmabuf->lock;
290 struct sg_table *sgt;
292 mutex_lock(lock);
294 sgt = &attach->sgt;
295 /* return previously mapped sg table */
296 if (attach->dma_dir == dma_dir) {
297 mutex_unlock(lock);
298 return sgt;
301 /* release any previous cache */
302 if (attach->dma_dir != DMA_NONE) {
303 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
304 attach->dma_dir);
305 attach->dma_dir = DMA_NONE;
308 /* mapping to the client with new direction */
309 sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
310 dma_dir);
311 if (!sgt->nents) {
312 pr_err("failed to map scatterlist\n");
313 mutex_unlock(lock);
314 return ERR_PTR(-EIO);
317 attach->dma_dir = dma_dir;
319 mutex_unlock(lock);
321 return sgt;
324 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
325 struct sg_table *sgt, enum dma_data_direction dma_dir)
327 /* nothing to be done here */
330 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
332 /* drop reference obtained in vb2_dc_get_dmabuf */
333 vb2_dc_put(dbuf->priv);
336 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
338 struct vb2_dc_buf *buf = dbuf->priv;
340 return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
343 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
345 struct vb2_dc_buf *buf = dbuf->priv;
347 return buf->vaddr;
350 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
351 struct vm_area_struct *vma)
353 return vb2_dc_mmap(dbuf->priv, vma);
356 static struct dma_buf_ops vb2_dc_dmabuf_ops = {
357 .attach = vb2_dc_dmabuf_ops_attach,
358 .detach = vb2_dc_dmabuf_ops_detach,
359 .map_dma_buf = vb2_dc_dmabuf_ops_map,
360 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
361 .kmap = vb2_dc_dmabuf_ops_kmap,
362 .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
363 .vmap = vb2_dc_dmabuf_ops_vmap,
364 .mmap = vb2_dc_dmabuf_ops_mmap,
365 .release = vb2_dc_dmabuf_ops_release,
368 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
370 int ret;
371 struct sg_table *sgt;
373 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
374 if (!sgt) {
375 dev_err(buf->dev, "failed to alloc sg table\n");
376 return NULL;
379 ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
380 buf->size, &buf->attrs);
381 if (ret < 0) {
382 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
383 kfree(sgt);
384 return NULL;
387 return sgt;
390 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
392 struct vb2_dc_buf *buf = buf_priv;
393 struct dma_buf *dbuf;
394 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
396 exp_info.ops = &vb2_dc_dmabuf_ops;
397 exp_info.size = buf->size;
398 exp_info.flags = flags;
399 exp_info.priv = buf;
401 if (!buf->sgt_base)
402 buf->sgt_base = vb2_dc_get_base_sgt(buf);
404 if (WARN_ON(!buf->sgt_base))
405 return NULL;
407 dbuf = dma_buf_export(&exp_info);
408 if (IS_ERR(dbuf))
409 return NULL;
411 /* dmabuf keeps reference to vb2 buffer */
412 atomic_inc(&buf->refcount);
414 return dbuf;
417 /*********************************************/
418 /* callbacks for USERPTR buffers */
419 /*********************************************/
421 static void vb2_dc_put_userptr(void *buf_priv)
423 struct vb2_dc_buf *buf = buf_priv;
424 struct sg_table *sgt = buf->dma_sgt;
425 int i;
426 struct page **pages;
428 if (sgt) {
429 DEFINE_DMA_ATTRS(attrs);
431 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
433 * No need to sync to CPU, it's already synced to the CPU
434 * since the finish() memop will have been called before this.
436 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
437 buf->dma_dir, &attrs);
438 pages = frame_vector_pages(buf->vec);
439 /* sgt should exist only if vector contains pages... */
440 BUG_ON(IS_ERR(pages));
441 for (i = 0; i < frame_vector_count(buf->vec); i++)
442 set_page_dirty_lock(pages[i]);
443 sg_free_table(sgt);
444 kfree(sgt);
446 vb2_destroy_framevec(buf->vec);
447 kfree(buf);
451 * For some kind of reserved memory there might be no struct page available,
452 * so all that can be done to support such 'pages' is to try to convert
453 * pfn to dma address or at the last resort just assume that
454 * dma address == physical address (like it has been assumed in earlier version
455 * of videobuf2-dma-contig
458 #ifdef __arch_pfn_to_dma
459 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
461 return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
463 #elif defined(__pfn_to_bus)
464 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
466 return (dma_addr_t)__pfn_to_bus(pfn);
468 #elif defined(__pfn_to_phys)
469 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
471 return (dma_addr_t)__pfn_to_phys(pfn);
473 #else
474 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
476 /* really, we cannot do anything better at this point */
477 return (dma_addr_t)(pfn) << PAGE_SHIFT;
479 #endif
481 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
482 unsigned long size, enum dma_data_direction dma_dir)
484 struct vb2_dc_conf *conf = alloc_ctx;
485 struct vb2_dc_buf *buf;
486 struct frame_vector *vec;
487 unsigned long offset;
488 int n_pages, i;
489 int ret = 0;
490 struct sg_table *sgt;
491 unsigned long contig_size;
492 unsigned long dma_align = dma_get_cache_alignment();
493 DEFINE_DMA_ATTRS(attrs);
495 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
497 /* Only cache aligned DMA transfers are reliable */
498 if (!IS_ALIGNED(vaddr | size, dma_align)) {
499 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
500 return ERR_PTR(-EINVAL);
503 if (!size) {
504 pr_debug("size is zero\n");
505 return ERR_PTR(-EINVAL);
508 buf = kzalloc(sizeof *buf, GFP_KERNEL);
509 if (!buf)
510 return ERR_PTR(-ENOMEM);
512 buf->dev = conf->dev;
513 buf->dma_dir = dma_dir;
515 offset = vaddr & ~PAGE_MASK;
516 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
517 if (IS_ERR(vec)) {
518 ret = PTR_ERR(vec);
519 goto fail_buf;
521 buf->vec = vec;
522 n_pages = frame_vector_count(vec);
523 ret = frame_vector_to_pages(vec);
524 if (ret < 0) {
525 unsigned long *nums = frame_vector_pfns(vec);
528 * Failed to convert to pages... Check the memory is physically
529 * contiguous and use direct mapping
531 for (i = 1; i < n_pages; i++)
532 if (nums[i-1] + 1 != nums[i])
533 goto fail_pfnvec;
534 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
535 goto out;
538 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
539 if (!sgt) {
540 pr_err("failed to allocate sg table\n");
541 ret = -ENOMEM;
542 goto fail_pfnvec;
545 ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
546 offset, size, GFP_KERNEL);
547 if (ret) {
548 pr_err("failed to initialize sg table\n");
549 goto fail_sgt;
553 * No need to sync to the device, this will happen later when the
554 * prepare() memop is called.
556 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
557 buf->dma_dir, &attrs);
558 if (sgt->nents <= 0) {
559 pr_err("failed to map scatterlist\n");
560 ret = -EIO;
561 goto fail_sgt_init;
564 contig_size = vb2_dc_get_contiguous_size(sgt);
565 if (contig_size < size) {
566 pr_err("contiguous mapping is too small %lu/%lu\n",
567 contig_size, size);
568 ret = -EFAULT;
569 goto fail_map_sg;
572 buf->dma_addr = sg_dma_address(sgt->sgl);
573 buf->dma_sgt = sgt;
574 out:
575 buf->size = size;
577 return buf;
579 fail_map_sg:
580 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
581 buf->dma_dir, &attrs);
583 fail_sgt_init:
584 sg_free_table(sgt);
586 fail_sgt:
587 kfree(sgt);
589 fail_pfnvec:
590 vb2_destroy_framevec(vec);
592 fail_buf:
593 kfree(buf);
595 return ERR_PTR(ret);
598 /*********************************************/
599 /* callbacks for DMABUF buffers */
600 /*********************************************/
602 static int vb2_dc_map_dmabuf(void *mem_priv)
604 struct vb2_dc_buf *buf = mem_priv;
605 struct sg_table *sgt;
606 unsigned long contig_size;
608 if (WARN_ON(!buf->db_attach)) {
609 pr_err("trying to pin a non attached buffer\n");
610 return -EINVAL;
613 if (WARN_ON(buf->dma_sgt)) {
614 pr_err("dmabuf buffer is already pinned\n");
615 return 0;
618 /* get the associated scatterlist for this buffer */
619 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
620 if (IS_ERR(sgt)) {
621 pr_err("Error getting dmabuf scatterlist\n");
622 return -EINVAL;
625 /* checking if dmabuf is big enough to store contiguous chunk */
626 contig_size = vb2_dc_get_contiguous_size(sgt);
627 if (contig_size < buf->size) {
628 pr_err("contiguous chunk is too small %lu/%lu b\n",
629 contig_size, buf->size);
630 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
631 return -EFAULT;
634 buf->dma_addr = sg_dma_address(sgt->sgl);
635 buf->dma_sgt = sgt;
636 buf->vaddr = NULL;
638 return 0;
641 static void vb2_dc_unmap_dmabuf(void *mem_priv)
643 struct vb2_dc_buf *buf = mem_priv;
644 struct sg_table *sgt = buf->dma_sgt;
646 if (WARN_ON(!buf->db_attach)) {
647 pr_err("trying to unpin a not attached buffer\n");
648 return;
651 if (WARN_ON(!sgt)) {
652 pr_err("dmabuf buffer is already unpinned\n");
653 return;
656 if (buf->vaddr) {
657 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
658 buf->vaddr = NULL;
660 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
662 buf->dma_addr = 0;
663 buf->dma_sgt = NULL;
666 static void vb2_dc_detach_dmabuf(void *mem_priv)
668 struct vb2_dc_buf *buf = mem_priv;
670 /* if vb2 works correctly you should never detach mapped buffer */
671 if (WARN_ON(buf->dma_addr))
672 vb2_dc_unmap_dmabuf(buf);
674 /* detach this attachment */
675 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
676 kfree(buf);
679 static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
680 unsigned long size, enum dma_data_direction dma_dir)
682 struct vb2_dc_conf *conf = alloc_ctx;
683 struct vb2_dc_buf *buf;
684 struct dma_buf_attachment *dba;
686 if (dbuf->size < size)
687 return ERR_PTR(-EFAULT);
689 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
690 if (!buf)
691 return ERR_PTR(-ENOMEM);
693 buf->dev = conf->dev;
694 /* create attachment for the dmabuf with the user device */
695 dba = dma_buf_attach(dbuf, buf->dev);
696 if (IS_ERR(dba)) {
697 pr_err("failed to attach dmabuf\n");
698 kfree(buf);
699 return dba;
702 buf->dma_dir = dma_dir;
703 buf->size = size;
704 buf->db_attach = dba;
706 return buf;
709 /*********************************************/
710 /* DMA CONTIG exported functions */
711 /*********************************************/
713 const struct vb2_mem_ops vb2_dma_contig_memops = {
714 .alloc = vb2_dc_alloc,
715 .put = vb2_dc_put,
716 .get_dmabuf = vb2_dc_get_dmabuf,
717 .cookie = vb2_dc_cookie,
718 .vaddr = vb2_dc_vaddr,
719 .mmap = vb2_dc_mmap,
720 .get_userptr = vb2_dc_get_userptr,
721 .put_userptr = vb2_dc_put_userptr,
722 .prepare = vb2_dc_prepare,
723 .finish = vb2_dc_finish,
724 .map_dmabuf = vb2_dc_map_dmabuf,
725 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
726 .attach_dmabuf = vb2_dc_attach_dmabuf,
727 .detach_dmabuf = vb2_dc_detach_dmabuf,
728 .num_users = vb2_dc_num_users,
730 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
732 void *vb2_dma_contig_init_ctx_attrs(struct device *dev,
733 struct dma_attrs *attrs)
735 struct vb2_dc_conf *conf;
737 conf = kzalloc(sizeof *conf, GFP_KERNEL);
738 if (!conf)
739 return ERR_PTR(-ENOMEM);
741 conf->dev = dev;
742 if (attrs)
743 conf->attrs = *attrs;
745 return conf;
747 EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx_attrs);
749 void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
751 if (!IS_ERR_OR_NULL(alloc_ctx))
752 kfree(alloc_ctx);
754 EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
756 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
757 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
758 MODULE_LICENSE("GPL");