2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
4 * Copyright (C) 2010 Samsung Electronics
6 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
13 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-memops.h>
22 #include <media/videobuf2-dma-sg.h>
25 module_param(debug
, int, 0644);
27 #define dprintk(level, fmt, arg...) \
30 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
33 struct vb2_dma_sg_buf
{
38 struct sg_table sg_table
;
40 unsigned int num_pages
;
42 struct vb2_vmarea_handler handler
;
43 struct vm_area_struct
*vma
;
46 static void vb2_dma_sg_put(void *buf_priv
);
48 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf
*buf
,
51 unsigned int last_page
= 0;
59 order
= get_order(size
);
60 /* Dont over allocate*/
61 if ((PAGE_SIZE
<< order
) > size
)
66 pages
= alloc_pages(GFP_KERNEL
| __GFP_ZERO
|
67 __GFP_NOWARN
| gfp_flags
, order
);
73 __free_page(buf
->pages
[last_page
]);
79 split_page(pages
, order
);
80 for (i
= 0; i
< (1 << order
); i
++)
81 buf
->pages
[last_page
++] = &pages
[i
];
83 size
-= PAGE_SIZE
<< order
;
89 static void *vb2_dma_sg_alloc(void *alloc_ctx
, unsigned long size
, gfp_t gfp_flags
)
91 struct vb2_dma_sg_buf
*buf
;
95 buf
= kzalloc(sizeof *buf
, GFP_KERNEL
);
103 /* size is already page aligned */
104 buf
->num_pages
= size
>> PAGE_SHIFT
;
106 buf
->pages
= kzalloc(buf
->num_pages
* sizeof(struct page
*),
109 goto fail_pages_array_alloc
;
111 ret
= vb2_dma_sg_alloc_compacted(buf
, gfp_flags
);
113 goto fail_pages_alloc
;
115 ret
= sg_alloc_table_from_pages(&buf
->sg_table
, buf
->pages
,
116 buf
->num_pages
, 0, size
, gfp_flags
);
118 goto fail_table_alloc
;
120 buf
->handler
.refcount
= &buf
->refcount
;
121 buf
->handler
.put
= vb2_dma_sg_put
;
122 buf
->handler
.arg
= buf
;
124 atomic_inc(&buf
->refcount
);
126 dprintk(1, "%s: Allocated buffer of %d pages\n",
127 __func__
, buf
->num_pages
);
131 num_pages
= buf
->num_pages
;
133 __free_page(buf
->pages
[num_pages
]);
136 fail_pages_array_alloc
:
141 static void vb2_dma_sg_put(void *buf_priv
)
143 struct vb2_dma_sg_buf
*buf
= buf_priv
;
144 int i
= buf
->num_pages
;
146 if (atomic_dec_and_test(&buf
->refcount
)) {
147 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__
,
150 vm_unmap_ram(buf
->vaddr
, buf
->num_pages
);
151 sg_free_table(&buf
->sg_table
);
153 __free_page(buf
->pages
[i
]);
159 static inline int vma_is_io(struct vm_area_struct
*vma
)
161 return !!(vma
->vm_flags
& (VM_IO
| VM_PFNMAP
));
164 static void *vb2_dma_sg_get_userptr(void *alloc_ctx
, unsigned long vaddr
,
165 unsigned long size
, int write
)
167 struct vb2_dma_sg_buf
*buf
;
168 unsigned long first
, last
;
169 int num_pages_from_user
;
170 struct vm_area_struct
*vma
;
172 buf
= kzalloc(sizeof *buf
, GFP_KERNEL
);
178 buf
->offset
= vaddr
& ~PAGE_MASK
;
181 first
= (vaddr
& PAGE_MASK
) >> PAGE_SHIFT
;
182 last
= ((vaddr
+ size
- 1) & PAGE_MASK
) >> PAGE_SHIFT
;
183 buf
->num_pages
= last
- first
+ 1;
185 buf
->pages
= kzalloc(buf
->num_pages
* sizeof(struct page
*),
188 goto userptr_fail_alloc_pages
;
190 vma
= find_vma(current
->mm
, vaddr
);
192 dprintk(1, "no vma for address %lu\n", vaddr
);
193 goto userptr_fail_find_vma
;
196 if (vma
->vm_end
< vaddr
+ size
) {
197 dprintk(1, "vma at %lu is too small for %lu bytes\n",
199 goto userptr_fail_find_vma
;
202 buf
->vma
= vb2_get_vma(vma
);
204 dprintk(1, "failed to copy vma\n");
205 goto userptr_fail_find_vma
;
208 if (vma_is_io(buf
->vma
)) {
209 for (num_pages_from_user
= 0;
210 num_pages_from_user
< buf
->num_pages
;
211 ++num_pages_from_user
, vaddr
+= PAGE_SIZE
) {
214 if (follow_pfn(buf
->vma
, vaddr
, &pfn
)) {
215 dprintk(1, "no page for address %lu\n", vaddr
);
218 buf
->pages
[num_pages_from_user
] = pfn_to_page(pfn
);
221 num_pages_from_user
= get_user_pages(current
, current
->mm
,
229 if (num_pages_from_user
!= buf
->num_pages
)
230 goto userptr_fail_get_user_pages
;
232 if (sg_alloc_table_from_pages(&buf
->sg_table
, buf
->pages
,
233 buf
->num_pages
, buf
->offset
, size
, 0))
234 goto userptr_fail_alloc_table_from_pages
;
238 userptr_fail_alloc_table_from_pages
:
239 userptr_fail_get_user_pages
:
240 dprintk(1, "get_user_pages requested/got: %d/%d]\n",
241 buf
->num_pages
, num_pages_from_user
);
242 if (!vma_is_io(buf
->vma
))
243 while (--num_pages_from_user
>= 0)
244 put_page(buf
->pages
[num_pages_from_user
]);
245 vb2_put_vma(buf
->vma
);
246 userptr_fail_find_vma
:
248 userptr_fail_alloc_pages
:
254 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
257 static void vb2_dma_sg_put_userptr(void *buf_priv
)
259 struct vb2_dma_sg_buf
*buf
= buf_priv
;
260 int i
= buf
->num_pages
;
262 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
263 __func__
, buf
->num_pages
);
265 vm_unmap_ram(buf
->vaddr
, buf
->num_pages
);
266 sg_free_table(&buf
->sg_table
);
269 set_page_dirty_lock(buf
->pages
[i
]);
270 if (!vma_is_io(buf
->vma
))
271 put_page(buf
->pages
[i
]);
274 vb2_put_vma(buf
->vma
);
278 static void *vb2_dma_sg_vaddr(void *buf_priv
)
280 struct vb2_dma_sg_buf
*buf
= buf_priv
;
285 buf
->vaddr
= vm_map_ram(buf
->pages
,
290 /* add offset in case userptr is not page-aligned */
291 return buf
->vaddr
+ buf
->offset
;
294 static unsigned int vb2_dma_sg_num_users(void *buf_priv
)
296 struct vb2_dma_sg_buf
*buf
= buf_priv
;
298 return atomic_read(&buf
->refcount
);
301 static int vb2_dma_sg_mmap(void *buf_priv
, struct vm_area_struct
*vma
)
303 struct vb2_dma_sg_buf
*buf
= buf_priv
;
304 unsigned long uaddr
= vma
->vm_start
;
305 unsigned long usize
= vma
->vm_end
- vma
->vm_start
;
309 printk(KERN_ERR
"No memory to map\n");
316 ret
= vm_insert_page(vma
, uaddr
, buf
->pages
[i
++]);
318 printk(KERN_ERR
"Remapping memory, error: %d\n", ret
);
328 * Use common vm_area operations to track buffer refcount.
330 vma
->vm_private_data
= &buf
->handler
;
331 vma
->vm_ops
= &vb2_common_vm_ops
;
333 vma
->vm_ops
->open(vma
);
338 static void *vb2_dma_sg_cookie(void *buf_priv
)
340 struct vb2_dma_sg_buf
*buf
= buf_priv
;
342 return &buf
->sg_table
;
345 const struct vb2_mem_ops vb2_dma_sg_memops
= {
346 .alloc
= vb2_dma_sg_alloc
,
347 .put
= vb2_dma_sg_put
,
348 .get_userptr
= vb2_dma_sg_get_userptr
,
349 .put_userptr
= vb2_dma_sg_put_userptr
,
350 .vaddr
= vb2_dma_sg_vaddr
,
351 .mmap
= vb2_dma_sg_mmap
,
352 .num_users
= vb2_dma_sg_num_users
,
353 .cookie
= vb2_dma_sg_cookie
,
355 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops
);
357 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
358 MODULE_AUTHOR("Andrzej Pietrasiewicz");
359 MODULE_LICENSE("GPL");