2 * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
4 * Copyright (C) 2010 Samsung Electronics
6 * Author: Pawel Osciak <pawel@osciak.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
14 #include <linux/module.h>
16 #include <linux/refcount.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-vmalloc.h>
23 #include <media/videobuf2-memops.h>
25 struct vb2_vmalloc_buf
{
27 struct frame_vector
*vec
;
28 enum dma_data_direction dma_dir
;
31 struct vb2_vmarea_handler handler
;
35 static void vb2_vmalloc_put(void *buf_priv
);
37 static void *vb2_vmalloc_alloc(struct device
*dev
, unsigned long attrs
,
38 unsigned long size
, enum dma_data_direction dma_dir
,
41 struct vb2_vmalloc_buf
*buf
;
43 buf
= kzalloc(sizeof(*buf
), GFP_KERNEL
| gfp_flags
);
45 return ERR_PTR(-ENOMEM
);
48 buf
->vaddr
= vmalloc_user(buf
->size
);
50 pr_debug("vmalloc of size %ld failed\n", buf
->size
);
52 return ERR_PTR(-ENOMEM
);
55 buf
->dma_dir
= dma_dir
;
56 buf
->handler
.refcount
= &buf
->refcount
;
57 buf
->handler
.put
= vb2_vmalloc_put
;
58 buf
->handler
.arg
= buf
;
60 refcount_set(&buf
->refcount
, 1);
64 static void vb2_vmalloc_put(void *buf_priv
)
66 struct vb2_vmalloc_buf
*buf
= buf_priv
;
68 if (refcount_dec_and_test(&buf
->refcount
)) {
74 static void *vb2_vmalloc_get_userptr(struct device
*dev
, unsigned long vaddr
,
76 enum dma_data_direction dma_dir
)
78 struct vb2_vmalloc_buf
*buf
;
79 struct frame_vector
*vec
;
80 int n_pages
, offset
, i
;
83 buf
= kzalloc(sizeof(*buf
), GFP_KERNEL
);
85 return ERR_PTR(-ENOMEM
);
87 buf
->dma_dir
= dma_dir
;
88 offset
= vaddr
& ~PAGE_MASK
;
90 vec
= vb2_create_framevec(vaddr
, size
);
93 goto fail_pfnvec_create
;
96 n_pages
= frame_vector_count(vec
);
97 if (frame_vector_to_pages(vec
) < 0) {
98 unsigned long *nums
= frame_vector_pfns(vec
);
101 * We cannot get page pointers for these pfns. Check memory is
102 * physically contiguous and use direct mapping.
104 for (i
= 1; i
< n_pages
; i
++)
105 if (nums
[i
-1] + 1 != nums
[i
])
107 buf
->vaddr
= (__force
void *)
108 ioremap(__pfn_to_phys(nums
[0]), size
+ offset
);
110 buf
->vaddr
= vm_map_ram(frame_vector_pages(vec
), n_pages
, -1,
116 buf
->vaddr
+= offset
;
120 vb2_destroy_framevec(vec
);
127 static void vb2_vmalloc_put_userptr(void *buf_priv
)
129 struct vb2_vmalloc_buf
*buf
= buf_priv
;
130 unsigned long vaddr
= (unsigned long)buf
->vaddr
& PAGE_MASK
;
133 unsigned int n_pages
;
135 if (!buf
->vec
->is_pfns
) {
136 n_pages
= frame_vector_count(buf
->vec
);
137 pages
= frame_vector_pages(buf
->vec
);
139 vm_unmap_ram((void *)vaddr
, n_pages
);
140 if (buf
->dma_dir
== DMA_FROM_DEVICE
||
141 buf
->dma_dir
== DMA_BIDIRECTIONAL
)
142 for (i
= 0; i
< n_pages
; i
++)
143 set_page_dirty_lock(pages
[i
]);
145 iounmap((__force
void __iomem
*)buf
->vaddr
);
147 vb2_destroy_framevec(buf
->vec
);
151 static void *vb2_vmalloc_vaddr(void *buf_priv
)
153 struct vb2_vmalloc_buf
*buf
= buf_priv
;
156 pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
163 static unsigned int vb2_vmalloc_num_users(void *buf_priv
)
165 struct vb2_vmalloc_buf
*buf
= buf_priv
;
166 return refcount_read(&buf
->refcount
);
169 static int vb2_vmalloc_mmap(void *buf_priv
, struct vm_area_struct
*vma
)
171 struct vb2_vmalloc_buf
*buf
= buf_priv
;
175 pr_err("No memory to map\n");
179 ret
= remap_vmalloc_range(vma
, buf
->vaddr
, 0);
181 pr_err("Remapping vmalloc memory, error: %d\n", ret
);
186 * Make sure that vm_areas for 2 buffers won't be merged together
188 vma
->vm_flags
|= VM_DONTEXPAND
;
191 * Use common vm_area operations to track buffer refcount.
193 vma
->vm_private_data
= &buf
->handler
;
194 vma
->vm_ops
= &vb2_common_vm_ops
;
196 vma
->vm_ops
->open(vma
);
201 #ifdef CONFIG_HAS_DMA
202 /*********************************************/
203 /* DMABUF ops for exporters */
204 /*********************************************/
206 struct vb2_vmalloc_attachment
{
208 enum dma_data_direction dma_dir
;
211 static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf
*dbuf
,
212 struct dma_buf_attachment
*dbuf_attach
)
214 struct vb2_vmalloc_attachment
*attach
;
215 struct vb2_vmalloc_buf
*buf
= dbuf
->priv
;
216 int num_pages
= PAGE_ALIGN(buf
->size
) / PAGE_SIZE
;
217 struct sg_table
*sgt
;
218 struct scatterlist
*sg
;
219 void *vaddr
= buf
->vaddr
;
223 attach
= kzalloc(sizeof(*attach
), GFP_KERNEL
);
228 ret
= sg_alloc_table(sgt
, num_pages
, GFP_KERNEL
);
233 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
234 struct page
*page
= vmalloc_to_page(vaddr
);
241 sg_set_page(sg
, page
, PAGE_SIZE
, 0);
245 attach
->dma_dir
= DMA_NONE
;
246 dbuf_attach
->priv
= attach
;
250 static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf
*dbuf
,
251 struct dma_buf_attachment
*db_attach
)
253 struct vb2_vmalloc_attachment
*attach
= db_attach
->priv
;
254 struct sg_table
*sgt
;
261 /* release the scatterlist cache */
262 if (attach
->dma_dir
!= DMA_NONE
)
263 dma_unmap_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
,
267 db_attach
->priv
= NULL
;
270 static struct sg_table
*vb2_vmalloc_dmabuf_ops_map(
271 struct dma_buf_attachment
*db_attach
, enum dma_data_direction dma_dir
)
273 struct vb2_vmalloc_attachment
*attach
= db_attach
->priv
;
274 /* stealing dmabuf mutex to serialize map/unmap operations */
275 struct mutex
*lock
= &db_attach
->dmabuf
->lock
;
276 struct sg_table
*sgt
;
281 /* return previously mapped sg table */
282 if (attach
->dma_dir
== dma_dir
) {
287 /* release any previous cache */
288 if (attach
->dma_dir
!= DMA_NONE
) {
289 dma_unmap_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
,
291 attach
->dma_dir
= DMA_NONE
;
294 /* mapping to the client with new direction */
295 sgt
->nents
= dma_map_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
,
298 pr_err("failed to map scatterlist\n");
300 return ERR_PTR(-EIO
);
303 attach
->dma_dir
= dma_dir
;
310 static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment
*db_attach
,
311 struct sg_table
*sgt
, enum dma_data_direction dma_dir
)
313 /* nothing to be done here */
316 static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf
*dbuf
)
318 /* drop reference obtained in vb2_vmalloc_get_dmabuf */
319 vb2_vmalloc_put(dbuf
->priv
);
322 static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf
*dbuf
)
324 struct vb2_vmalloc_buf
*buf
= dbuf
->priv
;
329 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf
*dbuf
,
330 struct vm_area_struct
*vma
)
332 return vb2_vmalloc_mmap(dbuf
->priv
, vma
);
335 static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops
= {
336 .attach
= vb2_vmalloc_dmabuf_ops_attach
,
337 .detach
= vb2_vmalloc_dmabuf_ops_detach
,
338 .map_dma_buf
= vb2_vmalloc_dmabuf_ops_map
,
339 .unmap_dma_buf
= vb2_vmalloc_dmabuf_ops_unmap
,
340 .vmap
= vb2_vmalloc_dmabuf_ops_vmap
,
341 .mmap
= vb2_vmalloc_dmabuf_ops_mmap
,
342 .release
= vb2_vmalloc_dmabuf_ops_release
,
345 static struct dma_buf
*vb2_vmalloc_get_dmabuf(void *buf_priv
, unsigned long flags
)
347 struct vb2_vmalloc_buf
*buf
= buf_priv
;
348 struct dma_buf
*dbuf
;
349 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
351 exp_info
.ops
= &vb2_vmalloc_dmabuf_ops
;
352 exp_info
.size
= buf
->size
;
353 exp_info
.flags
= flags
;
356 if (WARN_ON(!buf
->vaddr
))
359 dbuf
= dma_buf_export(&exp_info
);
363 /* dmabuf keeps reference to vb2 buffer */
364 refcount_inc(&buf
->refcount
);
368 #endif /* CONFIG_HAS_DMA */
371 /*********************************************/
372 /* callbacks for DMABUF buffers */
373 /*********************************************/
375 static int vb2_vmalloc_map_dmabuf(void *mem_priv
)
377 struct vb2_vmalloc_buf
*buf
= mem_priv
;
379 buf
->vaddr
= dma_buf_vmap(buf
->dbuf
);
381 return buf
->vaddr
? 0 : -EFAULT
;
384 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv
)
386 struct vb2_vmalloc_buf
*buf
= mem_priv
;
388 dma_buf_vunmap(buf
->dbuf
, buf
->vaddr
);
392 static void vb2_vmalloc_detach_dmabuf(void *mem_priv
)
394 struct vb2_vmalloc_buf
*buf
= mem_priv
;
397 dma_buf_vunmap(buf
->dbuf
, buf
->vaddr
);
402 static void *vb2_vmalloc_attach_dmabuf(struct device
*dev
, struct dma_buf
*dbuf
,
403 unsigned long size
, enum dma_data_direction dma_dir
)
405 struct vb2_vmalloc_buf
*buf
;
407 if (dbuf
->size
< size
)
408 return ERR_PTR(-EFAULT
);
410 buf
= kzalloc(sizeof(*buf
), GFP_KERNEL
);
412 return ERR_PTR(-ENOMEM
);
415 buf
->dma_dir
= dma_dir
;
422 const struct vb2_mem_ops vb2_vmalloc_memops
= {
423 .alloc
= vb2_vmalloc_alloc
,
424 .put
= vb2_vmalloc_put
,
425 .get_userptr
= vb2_vmalloc_get_userptr
,
426 .put_userptr
= vb2_vmalloc_put_userptr
,
427 #ifdef CONFIG_HAS_DMA
428 .get_dmabuf
= vb2_vmalloc_get_dmabuf
,
430 .map_dmabuf
= vb2_vmalloc_map_dmabuf
,
431 .unmap_dmabuf
= vb2_vmalloc_unmap_dmabuf
,
432 .attach_dmabuf
= vb2_vmalloc_attach_dmabuf
,
433 .detach_dmabuf
= vb2_vmalloc_detach_dmabuf
,
434 .vaddr
= vb2_vmalloc_vaddr
,
435 .mmap
= vb2_vmalloc_mmap
,
436 .num_users
= vb2_vmalloc_num_users
,
438 EXPORT_SYMBOL_GPL(vb2_vmalloc_memops
);
440 MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
441 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
442 MODULE_LICENSE("GPL");