s390/ptrace: get rid of long longs in psw_bits
[linux/fpc-iii.git] / drivers / media / v4l2-core / videobuf2-dma-contig.c
blobc33127284cfeed93ba4bf9993c49c470b8587802
1 /*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
4 * Copyright (C) 2010 Samsung Electronics
6 * Author: Pawel Osciak <pawel@osciak.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
20 #include <media/videobuf2-v4l2.h>
21 #include <media/videobuf2-dma-contig.h>
22 #include <media/videobuf2-memops.h>
24 struct vb2_dc_conf {
25 struct device *dev;
28 struct vb2_dc_buf {
29 struct device *dev;
30 void *vaddr;
31 unsigned long size;
32 dma_addr_t dma_addr;
33 enum dma_data_direction dma_dir;
34 struct sg_table *dma_sgt;
35 struct frame_vector *vec;
37 /* MMAP related */
38 struct vb2_vmarea_handler handler;
39 atomic_t refcount;
40 struct sg_table *sgt_base;
42 /* DMABUF related */
43 struct dma_buf_attachment *db_attach;
46 /*********************************************/
47 /* scatterlist table functions */
48 /*********************************************/
50 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
52 struct scatterlist *s;
53 dma_addr_t expected = sg_dma_address(sgt->sgl);
54 unsigned int i;
55 unsigned long size = 0;
57 for_each_sg(sgt->sgl, s, sgt->nents, i) {
58 if (sg_dma_address(s) != expected)
59 break;
60 expected = sg_dma_address(s) + sg_dma_len(s);
61 size += sg_dma_len(s);
63 return size;
66 /*********************************************/
67 /* callbacks for all buffers */
68 /*********************************************/
70 static void *vb2_dc_cookie(void *buf_priv)
72 struct vb2_dc_buf *buf = buf_priv;
74 return &buf->dma_addr;
77 static void *vb2_dc_vaddr(void *buf_priv)
79 struct vb2_dc_buf *buf = buf_priv;
81 if (!buf->vaddr && buf->db_attach)
82 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
84 return buf->vaddr;
87 static unsigned int vb2_dc_num_users(void *buf_priv)
89 struct vb2_dc_buf *buf = buf_priv;
91 return atomic_read(&buf->refcount);
94 static void vb2_dc_prepare(void *buf_priv)
96 struct vb2_dc_buf *buf = buf_priv;
97 struct sg_table *sgt = buf->dma_sgt;
99 /* DMABUF exporter will flush the cache for us */
100 if (!sgt || buf->db_attach)
101 return;
103 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
104 buf->dma_dir);
107 static void vb2_dc_finish(void *buf_priv)
109 struct vb2_dc_buf *buf = buf_priv;
110 struct sg_table *sgt = buf->dma_sgt;
112 /* DMABUF exporter will flush the cache for us */
113 if (!sgt || buf->db_attach)
114 return;
116 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
119 /*********************************************/
120 /* callbacks for MMAP buffers */
121 /*********************************************/
123 static void vb2_dc_put(void *buf_priv)
125 struct vb2_dc_buf *buf = buf_priv;
127 if (!atomic_dec_and_test(&buf->refcount))
128 return;
130 if (buf->sgt_base) {
131 sg_free_table(buf->sgt_base);
132 kfree(buf->sgt_base);
134 dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
135 put_device(buf->dev);
136 kfree(buf);
139 static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size,
140 enum dma_data_direction dma_dir, gfp_t gfp_flags)
142 struct vb2_dc_conf *conf = alloc_ctx;
143 struct device *dev = conf->dev;
144 struct vb2_dc_buf *buf;
146 buf = kzalloc(sizeof *buf, GFP_KERNEL);
147 if (!buf)
148 return ERR_PTR(-ENOMEM);
150 buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr,
151 GFP_KERNEL | gfp_flags);
152 if (!buf->vaddr) {
153 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
154 kfree(buf);
155 return ERR_PTR(-ENOMEM);
158 /* Prevent the device from being released while the buffer is used */
159 buf->dev = get_device(dev);
160 buf->size = size;
161 buf->dma_dir = dma_dir;
163 buf->handler.refcount = &buf->refcount;
164 buf->handler.put = vb2_dc_put;
165 buf->handler.arg = buf;
167 atomic_inc(&buf->refcount);
169 return buf;
172 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
174 struct vb2_dc_buf *buf = buf_priv;
175 int ret;
177 if (!buf) {
178 printk(KERN_ERR "No buffer to map\n");
179 return -EINVAL;
183 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
184 * map whole buffer
186 vma->vm_pgoff = 0;
188 ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
189 buf->dma_addr, buf->size);
191 if (ret) {
192 pr_err("Remapping memory failed, error: %d\n", ret);
193 return ret;
196 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
197 vma->vm_private_data = &buf->handler;
198 vma->vm_ops = &vb2_common_vm_ops;
200 vma->vm_ops->open(vma);
202 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
203 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
204 buf->size);
206 return 0;
209 /*********************************************/
210 /* DMABUF ops for exporters */
211 /*********************************************/
213 struct vb2_dc_attachment {
214 struct sg_table sgt;
215 enum dma_data_direction dma_dir;
218 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
219 struct dma_buf_attachment *dbuf_attach)
221 struct vb2_dc_attachment *attach;
222 unsigned int i;
223 struct scatterlist *rd, *wr;
224 struct sg_table *sgt;
225 struct vb2_dc_buf *buf = dbuf->priv;
226 int ret;
228 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
229 if (!attach)
230 return -ENOMEM;
232 sgt = &attach->sgt;
233 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
234 * map the same scatter list to multiple attachments at the same time.
236 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
237 if (ret) {
238 kfree(attach);
239 return -ENOMEM;
242 rd = buf->sgt_base->sgl;
243 wr = sgt->sgl;
244 for (i = 0; i < sgt->orig_nents; ++i) {
245 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
246 rd = sg_next(rd);
247 wr = sg_next(wr);
250 attach->dma_dir = DMA_NONE;
251 dbuf_attach->priv = attach;
253 return 0;
256 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
257 struct dma_buf_attachment *db_attach)
259 struct vb2_dc_attachment *attach = db_attach->priv;
260 struct sg_table *sgt;
262 if (!attach)
263 return;
265 sgt = &attach->sgt;
267 /* release the scatterlist cache */
268 if (attach->dma_dir != DMA_NONE)
269 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
270 attach->dma_dir);
271 sg_free_table(sgt);
272 kfree(attach);
273 db_attach->priv = NULL;
276 static struct sg_table *vb2_dc_dmabuf_ops_map(
277 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
279 struct vb2_dc_attachment *attach = db_attach->priv;
280 /* stealing dmabuf mutex to serialize map/unmap operations */
281 struct mutex *lock = &db_attach->dmabuf->lock;
282 struct sg_table *sgt;
284 mutex_lock(lock);
286 sgt = &attach->sgt;
287 /* return previously mapped sg table */
288 if (attach->dma_dir == dma_dir) {
289 mutex_unlock(lock);
290 return sgt;
293 /* release any previous cache */
294 if (attach->dma_dir != DMA_NONE) {
295 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
296 attach->dma_dir);
297 attach->dma_dir = DMA_NONE;
300 /* mapping to the client with new direction */
301 sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
302 dma_dir);
303 if (!sgt->nents) {
304 pr_err("failed to map scatterlist\n");
305 mutex_unlock(lock);
306 return ERR_PTR(-EIO);
309 attach->dma_dir = dma_dir;
311 mutex_unlock(lock);
313 return sgt;
316 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
317 struct sg_table *sgt, enum dma_data_direction dma_dir)
319 /* nothing to be done here */
322 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
324 /* drop reference obtained in vb2_dc_get_dmabuf */
325 vb2_dc_put(dbuf->priv);
328 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
330 struct vb2_dc_buf *buf = dbuf->priv;
332 return buf->vaddr + pgnum * PAGE_SIZE;
335 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
337 struct vb2_dc_buf *buf = dbuf->priv;
339 return buf->vaddr;
342 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
343 struct vm_area_struct *vma)
345 return vb2_dc_mmap(dbuf->priv, vma);
348 static struct dma_buf_ops vb2_dc_dmabuf_ops = {
349 .attach = vb2_dc_dmabuf_ops_attach,
350 .detach = vb2_dc_dmabuf_ops_detach,
351 .map_dma_buf = vb2_dc_dmabuf_ops_map,
352 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
353 .kmap = vb2_dc_dmabuf_ops_kmap,
354 .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
355 .vmap = vb2_dc_dmabuf_ops_vmap,
356 .mmap = vb2_dc_dmabuf_ops_mmap,
357 .release = vb2_dc_dmabuf_ops_release,
360 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
362 int ret;
363 struct sg_table *sgt;
365 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
366 if (!sgt) {
367 dev_err(buf->dev, "failed to alloc sg table\n");
368 return NULL;
371 ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
372 buf->size);
373 if (ret < 0) {
374 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
375 kfree(sgt);
376 return NULL;
379 return sgt;
382 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
384 struct vb2_dc_buf *buf = buf_priv;
385 struct dma_buf *dbuf;
386 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
388 exp_info.ops = &vb2_dc_dmabuf_ops;
389 exp_info.size = buf->size;
390 exp_info.flags = flags;
391 exp_info.priv = buf;
393 if (!buf->sgt_base)
394 buf->sgt_base = vb2_dc_get_base_sgt(buf);
396 if (WARN_ON(!buf->sgt_base))
397 return NULL;
399 dbuf = dma_buf_export(&exp_info);
400 if (IS_ERR(dbuf))
401 return NULL;
403 /* dmabuf keeps reference to vb2 buffer */
404 atomic_inc(&buf->refcount);
406 return dbuf;
409 /*********************************************/
410 /* callbacks for USERPTR buffers */
411 /*********************************************/
413 static void vb2_dc_put_userptr(void *buf_priv)
415 struct vb2_dc_buf *buf = buf_priv;
416 struct sg_table *sgt = buf->dma_sgt;
417 int i;
418 struct page **pages;
420 if (sgt) {
421 DEFINE_DMA_ATTRS(attrs);
423 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
425 * No need to sync to CPU, it's already synced to the CPU
426 * since the finish() memop will have been called before this.
428 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
429 buf->dma_dir, &attrs);
430 pages = frame_vector_pages(buf->vec);
431 /* sgt should exist only if vector contains pages... */
432 BUG_ON(IS_ERR(pages));
433 for (i = 0; i < frame_vector_count(buf->vec); i++)
434 set_page_dirty_lock(pages[i]);
435 sg_free_table(sgt);
436 kfree(sgt);
438 vb2_destroy_framevec(buf->vec);
439 kfree(buf);
443 * For some kind of reserved memory there might be no struct page available,
444 * so all that can be done to support such 'pages' is to try to convert
445 * pfn to dma address or at the last resort just assume that
446 * dma address == physical address (like it has been assumed in earlier version
447 * of videobuf2-dma-contig
450 #ifdef __arch_pfn_to_dma
451 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
453 return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
455 #elif defined(__pfn_to_bus)
456 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
458 return (dma_addr_t)__pfn_to_bus(pfn);
460 #elif defined(__pfn_to_phys)
461 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
463 return (dma_addr_t)__pfn_to_phys(pfn);
465 #else
466 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
468 /* really, we cannot do anything better at this point */
469 return (dma_addr_t)(pfn) << PAGE_SHIFT;
471 #endif
473 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
474 unsigned long size, enum dma_data_direction dma_dir)
476 struct vb2_dc_conf *conf = alloc_ctx;
477 struct vb2_dc_buf *buf;
478 struct frame_vector *vec;
479 unsigned long offset;
480 int n_pages, i;
481 int ret = 0;
482 struct sg_table *sgt;
483 unsigned long contig_size;
484 unsigned long dma_align = dma_get_cache_alignment();
485 DEFINE_DMA_ATTRS(attrs);
487 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
489 /* Only cache aligned DMA transfers are reliable */
490 if (!IS_ALIGNED(vaddr | size, dma_align)) {
491 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
492 return ERR_PTR(-EINVAL);
495 if (!size) {
496 pr_debug("size is zero\n");
497 return ERR_PTR(-EINVAL);
500 buf = kzalloc(sizeof *buf, GFP_KERNEL);
501 if (!buf)
502 return ERR_PTR(-ENOMEM);
504 buf->dev = conf->dev;
505 buf->dma_dir = dma_dir;
507 offset = vaddr & ~PAGE_MASK;
508 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
509 if (IS_ERR(vec)) {
510 ret = PTR_ERR(vec);
511 goto fail_buf;
513 buf->vec = vec;
514 n_pages = frame_vector_count(vec);
515 ret = frame_vector_to_pages(vec);
516 if (ret < 0) {
517 unsigned long *nums = frame_vector_pfns(vec);
520 * Failed to convert to pages... Check the memory is physically
521 * contiguous and use direct mapping
523 for (i = 1; i < n_pages; i++)
524 if (nums[i-1] + 1 != nums[i])
525 goto fail_pfnvec;
526 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
527 goto out;
530 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
531 if (!sgt) {
532 pr_err("failed to allocate sg table\n");
533 ret = -ENOMEM;
534 goto fail_pfnvec;
537 ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
538 offset, size, GFP_KERNEL);
539 if (ret) {
540 pr_err("failed to initialize sg table\n");
541 goto fail_sgt;
545 * No need to sync to the device, this will happen later when the
546 * prepare() memop is called.
548 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
549 buf->dma_dir, &attrs);
550 if (sgt->nents <= 0) {
551 pr_err("failed to map scatterlist\n");
552 ret = -EIO;
553 goto fail_sgt_init;
556 contig_size = vb2_dc_get_contiguous_size(sgt);
557 if (contig_size < size) {
558 pr_err("contiguous mapping is too small %lu/%lu\n",
559 contig_size, size);
560 ret = -EFAULT;
561 goto fail_map_sg;
564 buf->dma_addr = sg_dma_address(sgt->sgl);
565 buf->dma_sgt = sgt;
566 out:
567 buf->size = size;
569 return buf;
571 fail_map_sg:
572 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
573 buf->dma_dir, &attrs);
575 fail_sgt_init:
576 sg_free_table(sgt);
578 fail_sgt:
579 kfree(sgt);
581 fail_pfnvec:
582 vb2_destroy_framevec(vec);
584 fail_buf:
585 kfree(buf);
587 return ERR_PTR(ret);
590 /*********************************************/
591 /* callbacks for DMABUF buffers */
592 /*********************************************/
594 static int vb2_dc_map_dmabuf(void *mem_priv)
596 struct vb2_dc_buf *buf = mem_priv;
597 struct sg_table *sgt;
598 unsigned long contig_size;
600 if (WARN_ON(!buf->db_attach)) {
601 pr_err("trying to pin a non attached buffer\n");
602 return -EINVAL;
605 if (WARN_ON(buf->dma_sgt)) {
606 pr_err("dmabuf buffer is already pinned\n");
607 return 0;
610 /* get the associated scatterlist for this buffer */
611 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
612 if (IS_ERR(sgt)) {
613 pr_err("Error getting dmabuf scatterlist\n");
614 return -EINVAL;
617 /* checking if dmabuf is big enough to store contiguous chunk */
618 contig_size = vb2_dc_get_contiguous_size(sgt);
619 if (contig_size < buf->size) {
620 pr_err("contiguous chunk is too small %lu/%lu b\n",
621 contig_size, buf->size);
622 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
623 return -EFAULT;
626 buf->dma_addr = sg_dma_address(sgt->sgl);
627 buf->dma_sgt = sgt;
628 buf->vaddr = NULL;
630 return 0;
633 static void vb2_dc_unmap_dmabuf(void *mem_priv)
635 struct vb2_dc_buf *buf = mem_priv;
636 struct sg_table *sgt = buf->dma_sgt;
638 if (WARN_ON(!buf->db_attach)) {
639 pr_err("trying to unpin a not attached buffer\n");
640 return;
643 if (WARN_ON(!sgt)) {
644 pr_err("dmabuf buffer is already unpinned\n");
645 return;
648 if (buf->vaddr) {
649 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
650 buf->vaddr = NULL;
652 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
654 buf->dma_addr = 0;
655 buf->dma_sgt = NULL;
658 static void vb2_dc_detach_dmabuf(void *mem_priv)
660 struct vb2_dc_buf *buf = mem_priv;
662 /* if vb2 works correctly you should never detach mapped buffer */
663 if (WARN_ON(buf->dma_addr))
664 vb2_dc_unmap_dmabuf(buf);
666 /* detach this attachment */
667 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
668 kfree(buf);
671 static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
672 unsigned long size, enum dma_data_direction dma_dir)
674 struct vb2_dc_conf *conf = alloc_ctx;
675 struct vb2_dc_buf *buf;
676 struct dma_buf_attachment *dba;
678 if (dbuf->size < size)
679 return ERR_PTR(-EFAULT);
681 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
682 if (!buf)
683 return ERR_PTR(-ENOMEM);
685 buf->dev = conf->dev;
686 /* create attachment for the dmabuf with the user device */
687 dba = dma_buf_attach(dbuf, buf->dev);
688 if (IS_ERR(dba)) {
689 pr_err("failed to attach dmabuf\n");
690 kfree(buf);
691 return dba;
694 buf->dma_dir = dma_dir;
695 buf->size = size;
696 buf->db_attach = dba;
698 return buf;
701 /*********************************************/
702 /* DMA CONTIG exported functions */
703 /*********************************************/
705 const struct vb2_mem_ops vb2_dma_contig_memops = {
706 .alloc = vb2_dc_alloc,
707 .put = vb2_dc_put,
708 .get_dmabuf = vb2_dc_get_dmabuf,
709 .cookie = vb2_dc_cookie,
710 .vaddr = vb2_dc_vaddr,
711 .mmap = vb2_dc_mmap,
712 .get_userptr = vb2_dc_get_userptr,
713 .put_userptr = vb2_dc_put_userptr,
714 .prepare = vb2_dc_prepare,
715 .finish = vb2_dc_finish,
716 .map_dmabuf = vb2_dc_map_dmabuf,
717 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
718 .attach_dmabuf = vb2_dc_attach_dmabuf,
719 .detach_dmabuf = vb2_dc_detach_dmabuf,
720 .num_users = vb2_dc_num_users,
722 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
724 void *vb2_dma_contig_init_ctx(struct device *dev)
726 struct vb2_dc_conf *conf;
728 conf = kzalloc(sizeof *conf, GFP_KERNEL);
729 if (!conf)
730 return ERR_PTR(-ENOMEM);
732 conf->dev = dev;
734 return conf;
736 EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
738 void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
740 if (!IS_ERR_OR_NULL(alloc_ctx))
741 kfree(alloc_ctx);
743 EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
745 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
746 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
747 MODULE_LICENSE("GPL");