2 * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
4 * Copyright (C) 2010 Samsung Electronics
6 * Author: Pawel Osciak <pawel@osciak.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
14 #include <linux/module.h>
16 #include <linux/refcount.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-vmalloc.h>
23 #include <media/videobuf2-memops.h>
25 struct vb2_vmalloc_buf
{
27 struct frame_vector
*vec
;
28 enum dma_data_direction dma_dir
;
31 struct vb2_vmarea_handler handler
;
35 static void vb2_vmalloc_put(void *buf_priv
);
37 static void *vb2_vmalloc_alloc(struct device
*dev
, unsigned long attrs
,
38 unsigned long size
, enum dma_data_direction dma_dir
,
41 struct vb2_vmalloc_buf
*buf
;
43 buf
= kzalloc(sizeof(*buf
), GFP_KERNEL
| gfp_flags
);
45 return ERR_PTR(-ENOMEM
);
48 buf
->vaddr
= vmalloc_user(buf
->size
);
49 buf
->dma_dir
= dma_dir
;
50 buf
->handler
.refcount
= &buf
->refcount
;
51 buf
->handler
.put
= vb2_vmalloc_put
;
52 buf
->handler
.arg
= buf
;
55 pr_debug("vmalloc of size %ld failed\n", buf
->size
);
57 return ERR_PTR(-ENOMEM
);
60 refcount_set(&buf
->refcount
, 1);
64 static void vb2_vmalloc_put(void *buf_priv
)
66 struct vb2_vmalloc_buf
*buf
= buf_priv
;
68 if (refcount_dec_and_test(&buf
->refcount
)) {
74 static void *vb2_vmalloc_get_userptr(struct device
*dev
, unsigned long vaddr
,
76 enum dma_data_direction dma_dir
)
78 struct vb2_vmalloc_buf
*buf
;
79 struct frame_vector
*vec
;
80 int n_pages
, offset
, i
;
83 buf
= kzalloc(sizeof(*buf
), GFP_KERNEL
);
85 return ERR_PTR(-ENOMEM
);
87 buf
->dma_dir
= dma_dir
;
88 offset
= vaddr
& ~PAGE_MASK
;
90 vec
= vb2_create_framevec(vaddr
, size
, dma_dir
== DMA_FROM_DEVICE
||
91 dma_dir
== DMA_BIDIRECTIONAL
);
94 goto fail_pfnvec_create
;
97 n_pages
= frame_vector_count(vec
);
98 if (frame_vector_to_pages(vec
) < 0) {
99 unsigned long *nums
= frame_vector_pfns(vec
);
102 * We cannot get page pointers for these pfns. Check memory is
103 * physically contiguous and use direct mapping.
105 for (i
= 1; i
< n_pages
; i
++)
106 if (nums
[i
-1] + 1 != nums
[i
])
108 buf
->vaddr
= (__force
void *)
109 ioremap_nocache(nums
[0] << PAGE_SHIFT
, size
);
111 buf
->vaddr
= vm_map_ram(frame_vector_pages(vec
), n_pages
, -1,
117 buf
->vaddr
+= offset
;
121 vb2_destroy_framevec(vec
);
128 static void vb2_vmalloc_put_userptr(void *buf_priv
)
130 struct vb2_vmalloc_buf
*buf
= buf_priv
;
131 unsigned long vaddr
= (unsigned long)buf
->vaddr
& PAGE_MASK
;
134 unsigned int n_pages
;
136 if (!buf
->vec
->is_pfns
) {
137 n_pages
= frame_vector_count(buf
->vec
);
138 pages
= frame_vector_pages(buf
->vec
);
140 vm_unmap_ram((void *)vaddr
, n_pages
);
141 if (buf
->dma_dir
== DMA_FROM_DEVICE
||
142 buf
->dma_dir
== DMA_BIDIRECTIONAL
)
143 for (i
= 0; i
< n_pages
; i
++)
144 set_page_dirty_lock(pages
[i
]);
146 iounmap((__force
void __iomem
*)buf
->vaddr
);
148 vb2_destroy_framevec(buf
->vec
);
152 static void *vb2_vmalloc_vaddr(void *buf_priv
)
154 struct vb2_vmalloc_buf
*buf
= buf_priv
;
157 pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
164 static unsigned int vb2_vmalloc_num_users(void *buf_priv
)
166 struct vb2_vmalloc_buf
*buf
= buf_priv
;
167 return refcount_read(&buf
->refcount
);
170 static int vb2_vmalloc_mmap(void *buf_priv
, struct vm_area_struct
*vma
)
172 struct vb2_vmalloc_buf
*buf
= buf_priv
;
176 pr_err("No memory to map\n");
180 ret
= remap_vmalloc_range(vma
, buf
->vaddr
, 0);
182 pr_err("Remapping vmalloc memory, error: %d\n", ret
);
187 * Make sure that vm_areas for 2 buffers won't be merged together
189 vma
->vm_flags
|= VM_DONTEXPAND
;
192 * Use common vm_area operations to track buffer refcount.
194 vma
->vm_private_data
= &buf
->handler
;
195 vma
->vm_ops
= &vb2_common_vm_ops
;
197 vma
->vm_ops
->open(vma
);
202 #ifdef CONFIG_HAS_DMA
203 /*********************************************/
204 /* DMABUF ops for exporters */
205 /*********************************************/
207 struct vb2_vmalloc_attachment
{
209 enum dma_data_direction dma_dir
;
212 static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf
*dbuf
, struct device
*dev
,
213 struct dma_buf_attachment
*dbuf_attach
)
215 struct vb2_vmalloc_attachment
*attach
;
216 struct vb2_vmalloc_buf
*buf
= dbuf
->priv
;
217 int num_pages
= PAGE_ALIGN(buf
->size
) / PAGE_SIZE
;
218 struct sg_table
*sgt
;
219 struct scatterlist
*sg
;
220 void *vaddr
= buf
->vaddr
;
224 attach
= kzalloc(sizeof(*attach
), GFP_KERNEL
);
229 ret
= sg_alloc_table(sgt
, num_pages
, GFP_KERNEL
);
234 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
235 struct page
*page
= vmalloc_to_page(vaddr
);
242 sg_set_page(sg
, page
, PAGE_SIZE
, 0);
246 attach
->dma_dir
= DMA_NONE
;
247 dbuf_attach
->priv
= attach
;
251 static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf
*dbuf
,
252 struct dma_buf_attachment
*db_attach
)
254 struct vb2_vmalloc_attachment
*attach
= db_attach
->priv
;
255 struct sg_table
*sgt
;
262 /* release the scatterlist cache */
263 if (attach
->dma_dir
!= DMA_NONE
)
264 dma_unmap_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
,
268 db_attach
->priv
= NULL
;
271 static struct sg_table
*vb2_vmalloc_dmabuf_ops_map(
272 struct dma_buf_attachment
*db_attach
, enum dma_data_direction dma_dir
)
274 struct vb2_vmalloc_attachment
*attach
= db_attach
->priv
;
275 /* stealing dmabuf mutex to serialize map/unmap operations */
276 struct mutex
*lock
= &db_attach
->dmabuf
->lock
;
277 struct sg_table
*sgt
;
282 /* return previously mapped sg table */
283 if (attach
->dma_dir
== dma_dir
) {
288 /* release any previous cache */
289 if (attach
->dma_dir
!= DMA_NONE
) {
290 dma_unmap_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
,
292 attach
->dma_dir
= DMA_NONE
;
295 /* mapping to the client with new direction */
296 sgt
->nents
= dma_map_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
,
299 pr_err("failed to map scatterlist\n");
301 return ERR_PTR(-EIO
);
304 attach
->dma_dir
= dma_dir
;
311 static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment
*db_attach
,
312 struct sg_table
*sgt
, enum dma_data_direction dma_dir
)
314 /* nothing to be done here */
317 static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf
*dbuf
)
319 /* drop reference obtained in vb2_vmalloc_get_dmabuf */
320 vb2_vmalloc_put(dbuf
->priv
);
323 static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf
*dbuf
, unsigned long pgnum
)
325 struct vb2_vmalloc_buf
*buf
= dbuf
->priv
;
327 return buf
->vaddr
+ pgnum
* PAGE_SIZE
;
330 static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf
*dbuf
)
332 struct vb2_vmalloc_buf
*buf
= dbuf
->priv
;
337 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf
*dbuf
,
338 struct vm_area_struct
*vma
)
340 return vb2_vmalloc_mmap(dbuf
->priv
, vma
);
343 static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops
= {
344 .attach
= vb2_vmalloc_dmabuf_ops_attach
,
345 .detach
= vb2_vmalloc_dmabuf_ops_detach
,
346 .map_dma_buf
= vb2_vmalloc_dmabuf_ops_map
,
347 .unmap_dma_buf
= vb2_vmalloc_dmabuf_ops_unmap
,
348 .map
= vb2_vmalloc_dmabuf_ops_kmap
,
349 .map_atomic
= vb2_vmalloc_dmabuf_ops_kmap
,
350 .vmap
= vb2_vmalloc_dmabuf_ops_vmap
,
351 .mmap
= vb2_vmalloc_dmabuf_ops_mmap
,
352 .release
= vb2_vmalloc_dmabuf_ops_release
,
355 static struct dma_buf
*vb2_vmalloc_get_dmabuf(void *buf_priv
, unsigned long flags
)
357 struct vb2_vmalloc_buf
*buf
= buf_priv
;
358 struct dma_buf
*dbuf
;
359 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
361 exp_info
.ops
= &vb2_vmalloc_dmabuf_ops
;
362 exp_info
.size
= buf
->size
;
363 exp_info
.flags
= flags
;
366 if (WARN_ON(!buf
->vaddr
))
369 dbuf
= dma_buf_export(&exp_info
);
373 /* dmabuf keeps reference to vb2 buffer */
374 refcount_inc(&buf
->refcount
);
378 #endif /* CONFIG_HAS_DMA */
381 /*********************************************/
382 /* callbacks for DMABUF buffers */
383 /*********************************************/
385 static int vb2_vmalloc_map_dmabuf(void *mem_priv
)
387 struct vb2_vmalloc_buf
*buf
= mem_priv
;
389 buf
->vaddr
= dma_buf_vmap(buf
->dbuf
);
391 return buf
->vaddr
? 0 : -EFAULT
;
394 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv
)
396 struct vb2_vmalloc_buf
*buf
= mem_priv
;
398 dma_buf_vunmap(buf
->dbuf
, buf
->vaddr
);
402 static void vb2_vmalloc_detach_dmabuf(void *mem_priv
)
404 struct vb2_vmalloc_buf
*buf
= mem_priv
;
407 dma_buf_vunmap(buf
->dbuf
, buf
->vaddr
);
412 static void *vb2_vmalloc_attach_dmabuf(struct device
*dev
, struct dma_buf
*dbuf
,
413 unsigned long size
, enum dma_data_direction dma_dir
)
415 struct vb2_vmalloc_buf
*buf
;
417 if (dbuf
->size
< size
)
418 return ERR_PTR(-EFAULT
);
420 buf
= kzalloc(sizeof(*buf
), GFP_KERNEL
);
422 return ERR_PTR(-ENOMEM
);
425 buf
->dma_dir
= dma_dir
;
432 const struct vb2_mem_ops vb2_vmalloc_memops
= {
433 .alloc
= vb2_vmalloc_alloc
,
434 .put
= vb2_vmalloc_put
,
435 .get_userptr
= vb2_vmalloc_get_userptr
,
436 .put_userptr
= vb2_vmalloc_put_userptr
,
437 #ifdef CONFIG_HAS_DMA
438 .get_dmabuf
= vb2_vmalloc_get_dmabuf
,
440 .map_dmabuf
= vb2_vmalloc_map_dmabuf
,
441 .unmap_dmabuf
= vb2_vmalloc_unmap_dmabuf
,
442 .attach_dmabuf
= vb2_vmalloc_attach_dmabuf
,
443 .detach_dmabuf
= vb2_vmalloc_detach_dmabuf
,
444 .vaddr
= vb2_vmalloc_vaddr
,
445 .mmap
= vb2_vmalloc_mmap
,
446 .num_users
= vb2_vmalloc_num_users
,
448 EXPORT_SYMBOL_GPL(vb2_vmalloc_memops
);
450 MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
451 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
452 MODULE_LICENSE("GPL");