mfd: wm8350-i2c: Make sure the i2c regmap functions are compiled
[linux/fpc-iii.git] / drivers / media / v4l2-core / videobuf2-dma-contig.c
bloba833c67df62e98098551b0930b54f91912414ac4
1 /*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
4 * Copyright (C) 2010 Samsung Electronics
6 * Author: Pawel Osciak <pawel@osciak.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-dma-contig.h>
22 #include <media/videobuf2-memops.h>
24 struct vb2_dc_conf {
25 struct device *dev;
28 struct vb2_dc_buf {
29 struct device *dev;
30 void *vaddr;
31 unsigned long size;
32 dma_addr_t dma_addr;
33 enum dma_data_direction dma_dir;
34 struct sg_table *dma_sgt;
36 /* MMAP related */
37 struct vb2_vmarea_handler handler;
38 atomic_t refcount;
39 struct sg_table *sgt_base;
41 /* USERPTR related */
42 struct vm_area_struct *vma;
44 /* DMABUF related */
45 struct dma_buf_attachment *db_attach;
48 /*********************************************/
49 /* scatterlist table functions */
50 /*********************************************/
53 static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
54 void (*cb)(struct page *pg))
56 struct scatterlist *s;
57 unsigned int i;
59 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
60 struct page *page = sg_page(s);
61 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
62 >> PAGE_SHIFT;
63 unsigned int j;
65 for (j = 0; j < n_pages; ++j, ++page)
66 cb(page);
70 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
72 struct scatterlist *s;
73 dma_addr_t expected = sg_dma_address(sgt->sgl);
74 unsigned int i;
75 unsigned long size = 0;
77 for_each_sg(sgt->sgl, s, sgt->nents, i) {
78 if (sg_dma_address(s) != expected)
79 break;
80 expected = sg_dma_address(s) + sg_dma_len(s);
81 size += sg_dma_len(s);
83 return size;
86 /*********************************************/
87 /* callbacks for all buffers */
88 /*********************************************/
90 static void *vb2_dc_cookie(void *buf_priv)
92 struct vb2_dc_buf *buf = buf_priv;
94 return &buf->dma_addr;
97 static void *vb2_dc_vaddr(void *buf_priv)
99 struct vb2_dc_buf *buf = buf_priv;
101 return buf->vaddr;
104 static unsigned int vb2_dc_num_users(void *buf_priv)
106 struct vb2_dc_buf *buf = buf_priv;
108 return atomic_read(&buf->refcount);
111 static void vb2_dc_prepare(void *buf_priv)
113 struct vb2_dc_buf *buf = buf_priv;
114 struct sg_table *sgt = buf->dma_sgt;
116 /* DMABUF exporter will flush the cache for us */
117 if (!sgt || buf->db_attach)
118 return;
120 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
121 buf->dma_dir);
124 static void vb2_dc_finish(void *buf_priv)
126 struct vb2_dc_buf *buf = buf_priv;
127 struct sg_table *sgt = buf->dma_sgt;
129 /* DMABUF exporter will flush the cache for us */
130 if (!sgt || buf->db_attach)
131 return;
133 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
136 /*********************************************/
137 /* callbacks for MMAP buffers */
138 /*********************************************/
140 static void vb2_dc_put(void *buf_priv)
142 struct vb2_dc_buf *buf = buf_priv;
144 if (!atomic_dec_and_test(&buf->refcount))
145 return;
147 if (buf->sgt_base) {
148 sg_free_table(buf->sgt_base);
149 kfree(buf->sgt_base);
151 dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
152 put_device(buf->dev);
153 kfree(buf);
156 static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
158 struct vb2_dc_conf *conf = alloc_ctx;
159 struct device *dev = conf->dev;
160 struct vb2_dc_buf *buf;
162 buf = kzalloc(sizeof *buf, GFP_KERNEL);
163 if (!buf)
164 return ERR_PTR(-ENOMEM);
166 buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr,
167 GFP_KERNEL | gfp_flags);
168 if (!buf->vaddr) {
169 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
170 kfree(buf);
171 return ERR_PTR(-ENOMEM);
174 /* Prevent the device from being released while the buffer is used */
175 buf->dev = get_device(dev);
176 buf->size = size;
178 buf->handler.refcount = &buf->refcount;
179 buf->handler.put = vb2_dc_put;
180 buf->handler.arg = buf;
182 atomic_inc(&buf->refcount);
184 return buf;
187 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
189 struct vb2_dc_buf *buf = buf_priv;
190 int ret;
192 if (!buf) {
193 printk(KERN_ERR "No buffer to map\n");
194 return -EINVAL;
198 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
199 * map whole buffer
201 vma->vm_pgoff = 0;
203 ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
204 buf->dma_addr, buf->size);
206 if (ret) {
207 pr_err("Remapping memory failed, error: %d\n", ret);
208 return ret;
211 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
212 vma->vm_private_data = &buf->handler;
213 vma->vm_ops = &vb2_common_vm_ops;
215 vma->vm_ops->open(vma);
217 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
218 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
219 buf->size);
221 return 0;
224 /*********************************************/
225 /* DMABUF ops for exporters */
226 /*********************************************/
228 struct vb2_dc_attachment {
229 struct sg_table sgt;
230 enum dma_data_direction dir;
233 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
234 struct dma_buf_attachment *dbuf_attach)
236 struct vb2_dc_attachment *attach;
237 unsigned int i;
238 struct scatterlist *rd, *wr;
239 struct sg_table *sgt;
240 struct vb2_dc_buf *buf = dbuf->priv;
241 int ret;
243 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
244 if (!attach)
245 return -ENOMEM;
247 sgt = &attach->sgt;
248 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
249 * map the same scatter list to multiple attachments at the same time.
251 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
252 if (ret) {
253 kfree(attach);
254 return -ENOMEM;
257 rd = buf->sgt_base->sgl;
258 wr = sgt->sgl;
259 for (i = 0; i < sgt->orig_nents; ++i) {
260 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
261 rd = sg_next(rd);
262 wr = sg_next(wr);
265 attach->dir = DMA_NONE;
266 dbuf_attach->priv = attach;
268 return 0;
271 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
272 struct dma_buf_attachment *db_attach)
274 struct vb2_dc_attachment *attach = db_attach->priv;
275 struct sg_table *sgt;
277 if (!attach)
278 return;
280 sgt = &attach->sgt;
282 /* release the scatterlist cache */
283 if (attach->dir != DMA_NONE)
284 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
285 attach->dir);
286 sg_free_table(sgt);
287 kfree(attach);
288 db_attach->priv = NULL;
291 static struct sg_table *vb2_dc_dmabuf_ops_map(
292 struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
294 struct vb2_dc_attachment *attach = db_attach->priv;
295 /* stealing dmabuf mutex to serialize map/unmap operations */
296 struct mutex *lock = &db_attach->dmabuf->lock;
297 struct sg_table *sgt;
298 int ret;
300 mutex_lock(lock);
302 sgt = &attach->sgt;
303 /* return previously mapped sg table */
304 if (attach->dir == dir) {
305 mutex_unlock(lock);
306 return sgt;
309 /* release any previous cache */
310 if (attach->dir != DMA_NONE) {
311 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
312 attach->dir);
313 attach->dir = DMA_NONE;
316 /* mapping to the client with new direction */
317 ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
318 if (ret <= 0) {
319 pr_err("failed to map scatterlist\n");
320 mutex_unlock(lock);
321 return ERR_PTR(-EIO);
324 attach->dir = dir;
326 mutex_unlock(lock);
328 return sgt;
331 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
332 struct sg_table *sgt, enum dma_data_direction dir)
334 /* nothing to be done here */
337 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
339 /* drop reference obtained in vb2_dc_get_dmabuf */
340 vb2_dc_put(dbuf->priv);
343 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
345 struct vb2_dc_buf *buf = dbuf->priv;
347 return buf->vaddr + pgnum * PAGE_SIZE;
350 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
352 struct vb2_dc_buf *buf = dbuf->priv;
354 return buf->vaddr;
357 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
358 struct vm_area_struct *vma)
360 return vb2_dc_mmap(dbuf->priv, vma);
363 static struct dma_buf_ops vb2_dc_dmabuf_ops = {
364 .attach = vb2_dc_dmabuf_ops_attach,
365 .detach = vb2_dc_dmabuf_ops_detach,
366 .map_dma_buf = vb2_dc_dmabuf_ops_map,
367 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
368 .kmap = vb2_dc_dmabuf_ops_kmap,
369 .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
370 .vmap = vb2_dc_dmabuf_ops_vmap,
371 .mmap = vb2_dc_dmabuf_ops_mmap,
372 .release = vb2_dc_dmabuf_ops_release,
375 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
377 int ret;
378 struct sg_table *sgt;
380 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
381 if (!sgt) {
382 dev_err(buf->dev, "failed to alloc sg table\n");
383 return NULL;
386 ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
387 buf->size);
388 if (ret < 0) {
389 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
390 kfree(sgt);
391 return NULL;
394 return sgt;
397 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
399 struct vb2_dc_buf *buf = buf_priv;
400 struct dma_buf *dbuf;
402 if (!buf->sgt_base)
403 buf->sgt_base = vb2_dc_get_base_sgt(buf);
405 if (WARN_ON(!buf->sgt_base))
406 return NULL;
408 dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
409 if (IS_ERR(dbuf))
410 return NULL;
412 /* dmabuf keeps reference to vb2 buffer */
413 atomic_inc(&buf->refcount);
415 return dbuf;
418 /*********************************************/
419 /* callbacks for USERPTR buffers */
420 /*********************************************/
422 static inline int vma_is_io(struct vm_area_struct *vma)
424 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
427 static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
428 struct vm_area_struct *vma, unsigned long *res)
430 unsigned long pfn, start_pfn, prev_pfn;
431 unsigned int i;
432 int ret;
434 if (!vma_is_io(vma))
435 return -EFAULT;
437 ret = follow_pfn(vma, start, &pfn);
438 if (ret)
439 return ret;
441 start_pfn = pfn;
442 start += PAGE_SIZE;
444 for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
445 prev_pfn = pfn;
446 ret = follow_pfn(vma, start, &pfn);
448 if (ret) {
449 pr_err("no page for address %lu\n", start);
450 return ret;
452 if (pfn != prev_pfn + 1)
453 return -EINVAL;
456 *res = start_pfn;
457 return 0;
460 static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
461 int n_pages, struct vm_area_struct *vma, int write)
463 if (vma_is_io(vma)) {
464 unsigned int i;
466 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
467 unsigned long pfn;
468 int ret = follow_pfn(vma, start, &pfn);
470 if (!pfn_valid(pfn))
471 return -EINVAL;
473 if (ret) {
474 pr_err("no page for address %lu\n", start);
475 return ret;
477 pages[i] = pfn_to_page(pfn);
479 } else {
480 int n;
482 n = get_user_pages(current, current->mm, start & PAGE_MASK,
483 n_pages, write, 1, pages, NULL);
484 /* negative error means that no page was pinned */
485 n = max(n, 0);
486 if (n != n_pages) {
487 pr_err("got only %d of %d user pages\n", n, n_pages);
488 while (n)
489 put_page(pages[--n]);
490 return -EFAULT;
494 return 0;
497 static void vb2_dc_put_dirty_page(struct page *page)
499 set_page_dirty_lock(page);
500 put_page(page);
503 static void vb2_dc_put_userptr(void *buf_priv)
505 struct vb2_dc_buf *buf = buf_priv;
506 struct sg_table *sgt = buf->dma_sgt;
508 if (sgt) {
509 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
510 if (!vma_is_io(buf->vma))
511 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
513 sg_free_table(sgt);
514 kfree(sgt);
516 vb2_put_vma(buf->vma);
517 kfree(buf);
521 * For some kind of reserved memory there might be no struct page available,
522 * so all that can be done to support such 'pages' is to try to convert
523 * pfn to dma address or at the last resort just assume that
524 * dma address == physical address (like it has been assumed in earlier version
525 * of videobuf2-dma-contig
528 #ifdef __arch_pfn_to_dma
529 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
531 return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
533 #elif defined(__pfn_to_bus)
534 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
536 return (dma_addr_t)__pfn_to_bus(pfn);
538 #elif defined(__pfn_to_phys)
539 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
541 return (dma_addr_t)__pfn_to_phys(pfn);
543 #else
544 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
546 /* really, we cannot do anything better at this point */
547 return (dma_addr_t)(pfn) << PAGE_SHIFT;
549 #endif
551 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
552 unsigned long size, int write)
554 struct vb2_dc_conf *conf = alloc_ctx;
555 struct vb2_dc_buf *buf;
556 unsigned long start;
557 unsigned long end;
558 unsigned long offset;
559 struct page **pages;
560 int n_pages;
561 int ret = 0;
562 struct vm_area_struct *vma;
563 struct sg_table *sgt;
564 unsigned long contig_size;
565 unsigned long dma_align = dma_get_cache_alignment();
567 /* Only cache aligned DMA transfers are reliable */
568 if (!IS_ALIGNED(vaddr | size, dma_align)) {
569 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
570 return ERR_PTR(-EINVAL);
573 if (!size) {
574 pr_debug("size is zero\n");
575 return ERR_PTR(-EINVAL);
578 buf = kzalloc(sizeof *buf, GFP_KERNEL);
579 if (!buf)
580 return ERR_PTR(-ENOMEM);
582 buf->dev = conf->dev;
583 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
585 start = vaddr & PAGE_MASK;
586 offset = vaddr & ~PAGE_MASK;
587 end = PAGE_ALIGN(vaddr + size);
588 n_pages = (end - start) >> PAGE_SHIFT;
590 pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
591 if (!pages) {
592 ret = -ENOMEM;
593 pr_err("failed to allocate pages table\n");
594 goto fail_buf;
597 /* current->mm->mmap_sem is taken by videobuf2 core */
598 vma = find_vma(current->mm, vaddr);
599 if (!vma) {
600 pr_err("no vma for address %lu\n", vaddr);
601 ret = -EFAULT;
602 goto fail_pages;
605 if (vma->vm_end < vaddr + size) {
606 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
607 ret = -EFAULT;
608 goto fail_pages;
611 buf->vma = vb2_get_vma(vma);
612 if (!buf->vma) {
613 pr_err("failed to copy vma\n");
614 ret = -ENOMEM;
615 goto fail_pages;
618 /* extract page list from userspace mapping */
619 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
620 if (ret) {
621 unsigned long pfn;
622 if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
623 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
624 buf->size = size;
625 kfree(pages);
626 return buf;
629 pr_err("failed to get user pages\n");
630 goto fail_vma;
633 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
634 if (!sgt) {
635 pr_err("failed to allocate sg table\n");
636 ret = -ENOMEM;
637 goto fail_get_user_pages;
640 ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
641 offset, size, GFP_KERNEL);
642 if (ret) {
643 pr_err("failed to initialize sg table\n");
644 goto fail_sgt;
647 /* pages are no longer needed */
648 kfree(pages);
649 pages = NULL;
651 sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
652 buf->dma_dir);
653 if (sgt->nents <= 0) {
654 pr_err("failed to map scatterlist\n");
655 ret = -EIO;
656 goto fail_sgt_init;
659 contig_size = vb2_dc_get_contiguous_size(sgt);
660 if (contig_size < size) {
661 pr_err("contiguous mapping is too small %lu/%lu\n",
662 contig_size, size);
663 ret = -EFAULT;
664 goto fail_map_sg;
667 buf->dma_addr = sg_dma_address(sgt->sgl);
668 buf->size = size;
669 buf->dma_sgt = sgt;
671 return buf;
673 fail_map_sg:
674 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
676 fail_sgt_init:
677 if (!vma_is_io(buf->vma))
678 vb2_dc_sgt_foreach_page(sgt, put_page);
679 sg_free_table(sgt);
681 fail_sgt:
682 kfree(sgt);
684 fail_get_user_pages:
685 if (pages && !vma_is_io(buf->vma))
686 while (n_pages)
687 put_page(pages[--n_pages]);
689 fail_vma:
690 vb2_put_vma(buf->vma);
692 fail_pages:
693 kfree(pages); /* kfree is NULL-proof */
695 fail_buf:
696 kfree(buf);
698 return ERR_PTR(ret);
701 /*********************************************/
702 /* callbacks for DMABUF buffers */
703 /*********************************************/
705 static int vb2_dc_map_dmabuf(void *mem_priv)
707 struct vb2_dc_buf *buf = mem_priv;
708 struct sg_table *sgt;
709 unsigned long contig_size;
711 if (WARN_ON(!buf->db_attach)) {
712 pr_err("trying to pin a non attached buffer\n");
713 return -EINVAL;
716 if (WARN_ON(buf->dma_sgt)) {
717 pr_err("dmabuf buffer is already pinned\n");
718 return 0;
721 /* get the associated scatterlist for this buffer */
722 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
723 if (IS_ERR_OR_NULL(sgt)) {
724 pr_err("Error getting dmabuf scatterlist\n");
725 return -EINVAL;
728 /* checking if dmabuf is big enough to store contiguous chunk */
729 contig_size = vb2_dc_get_contiguous_size(sgt);
730 if (contig_size < buf->size) {
731 pr_err("contiguous chunk is too small %lu/%lu b\n",
732 contig_size, buf->size);
733 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
734 return -EFAULT;
737 buf->dma_addr = sg_dma_address(sgt->sgl);
738 buf->dma_sgt = sgt;
740 return 0;
743 static void vb2_dc_unmap_dmabuf(void *mem_priv)
745 struct vb2_dc_buf *buf = mem_priv;
746 struct sg_table *sgt = buf->dma_sgt;
748 if (WARN_ON(!buf->db_attach)) {
749 pr_err("trying to unpin a not attached buffer\n");
750 return;
753 if (WARN_ON(!sgt)) {
754 pr_err("dmabuf buffer is already unpinned\n");
755 return;
758 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
760 buf->dma_addr = 0;
761 buf->dma_sgt = NULL;
764 static void vb2_dc_detach_dmabuf(void *mem_priv)
766 struct vb2_dc_buf *buf = mem_priv;
768 /* if vb2 works correctly you should never detach mapped buffer */
769 if (WARN_ON(buf->dma_addr))
770 vb2_dc_unmap_dmabuf(buf);
772 /* detach this attachment */
773 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
774 kfree(buf);
777 static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
778 unsigned long size, int write)
780 struct vb2_dc_conf *conf = alloc_ctx;
781 struct vb2_dc_buf *buf;
782 struct dma_buf_attachment *dba;
784 if (dbuf->size < size)
785 return ERR_PTR(-EFAULT);
787 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
788 if (!buf)
789 return ERR_PTR(-ENOMEM);
791 buf->dev = conf->dev;
792 /* create attachment for the dmabuf with the user device */
793 dba = dma_buf_attach(dbuf, buf->dev);
794 if (IS_ERR(dba)) {
795 pr_err("failed to attach dmabuf\n");
796 kfree(buf);
797 return dba;
800 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
801 buf->size = size;
802 buf->db_attach = dba;
804 return buf;
807 /*********************************************/
808 /* DMA CONTIG exported functions */
809 /*********************************************/
811 const struct vb2_mem_ops vb2_dma_contig_memops = {
812 .alloc = vb2_dc_alloc,
813 .put = vb2_dc_put,
814 .get_dmabuf = vb2_dc_get_dmabuf,
815 .cookie = vb2_dc_cookie,
816 .vaddr = vb2_dc_vaddr,
817 .mmap = vb2_dc_mmap,
818 .get_userptr = vb2_dc_get_userptr,
819 .put_userptr = vb2_dc_put_userptr,
820 .prepare = vb2_dc_prepare,
821 .finish = vb2_dc_finish,
822 .map_dmabuf = vb2_dc_map_dmabuf,
823 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
824 .attach_dmabuf = vb2_dc_attach_dmabuf,
825 .detach_dmabuf = vb2_dc_detach_dmabuf,
826 .num_users = vb2_dc_num_users,
828 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
830 void *vb2_dma_contig_init_ctx(struct device *dev)
832 struct vb2_dc_conf *conf;
834 conf = kzalloc(sizeof *conf, GFP_KERNEL);
835 if (!conf)
836 return ERR_PTR(-ENOMEM);
838 conf->dev = dev;
840 return conf;
842 EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
844 void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
846 kfree(alloc_ctx);
848 EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
850 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
851 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
852 MODULE_LICENSE("GPL");