1 // SPDX-License-Identifier: GPL-2.0-only
3 * helper functions for physically contiguous capture buffers
5 * The functions support hardware lacking scatter gather support
6 * (i.e. the buffers must be linear in physical memory)
8 * Copyright (c) 2008 Magnus Damm
10 * Based on videobuf-vmalloc.c,
11 * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
14 #include <linux/init.h>
15 #include <linux/module.h>
17 #include <linux/pagemap.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <media/videobuf-dma-contig.h>
23 struct videobuf_dma_contig_memory
{
26 dma_addr_t dma_handle
;
30 #define MAGIC_DC_MEM 0x0733ac61
31 #define MAGIC_CHECK(is, should) \
32 if (unlikely((is) != (should))) { \
33 pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
37 static int __videobuf_dc_alloc(struct device
*dev
,
38 struct videobuf_dma_contig_memory
*mem
,
39 unsigned long size
, gfp_t flags
)
42 mem
->vaddr
= dma_alloc_coherent(dev
, mem
->size
,
43 &mem
->dma_handle
, flags
);
46 dev_err(dev
, "memory alloc size %ld failed\n", mem
->size
);
50 dev_dbg(dev
, "dma mapped data is at %p (%ld)\n", mem
->vaddr
, mem
->size
);
55 static void __videobuf_dc_free(struct device
*dev
,
56 struct videobuf_dma_contig_memory
*mem
)
58 dma_free_coherent(dev
, mem
->size
, mem
->vaddr
, mem
->dma_handle
);
63 static void videobuf_vm_open(struct vm_area_struct
*vma
)
65 struct videobuf_mapping
*map
= vma
->vm_private_data
;
67 dev_dbg(map
->q
->dev
, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
68 map
, map
->count
, vma
->vm_start
, vma
->vm_end
);
73 static void videobuf_vm_close(struct vm_area_struct
*vma
)
75 struct videobuf_mapping
*map
= vma
->vm_private_data
;
76 struct videobuf_queue
*q
= map
->q
;
79 dev_dbg(q
->dev
, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
80 map
, map
->count
, vma
->vm_start
, vma
->vm_end
);
83 if (0 == map
->count
) {
84 struct videobuf_dma_contig_memory
*mem
;
86 dev_dbg(q
->dev
, "munmap %p q=%p\n", map
, q
);
87 videobuf_queue_lock(q
);
89 /* We need first to cancel streams, before unmapping */
91 videobuf_queue_cancel(q
);
93 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
94 if (NULL
== q
->bufs
[i
])
97 if (q
->bufs
[i
]->map
!= map
)
100 mem
= q
->bufs
[i
]->priv
;
102 /* This callback is called only if kernel has
103 allocated memory and this memory is mmapped.
104 In this case, memory should be freed,
105 in order to do memory unmap.
108 MAGIC_CHECK(mem
->magic
, MAGIC_DC_MEM
);
110 /* vfree is not atomic - can't be
111 called with IRQ's disabled
113 dev_dbg(q
->dev
, "buf[%d] freeing %p\n",
116 __videobuf_dc_free(q
->dev
, mem
);
120 q
->bufs
[i
]->map
= NULL
;
121 q
->bufs
[i
]->baddr
= 0;
126 videobuf_queue_unlock(q
);
130 static const struct vm_operations_struct videobuf_vm_ops
= {
131 .open
= videobuf_vm_open
,
132 .close
= videobuf_vm_close
,
136 * videobuf_dma_contig_user_put() - reset pointer to user space buffer
137 * @mem: per-buffer private videobuf-dma-contig data
139 * This function resets the user space pointer
141 static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory
*mem
)
148 * videobuf_dma_contig_user_get() - setup user space memory pointer
149 * @mem: per-buffer private videobuf-dma-contig data
150 * @vb: video buffer to map
152 * This function validates and sets up a pointer to user space memory.
153 * Only physically contiguous pfn-mapped memory is accepted.
155 * Returns 0 if successful.
157 static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory
*mem
,
158 struct videobuf_buffer
*vb
)
160 unsigned long untagged_baddr
= untagged_addr(vb
->baddr
);
161 struct mm_struct
*mm
= current
->mm
;
162 struct vm_area_struct
*vma
;
163 unsigned long prev_pfn
, this_pfn
;
164 unsigned long pages_done
, user_address
;
168 offset
= untagged_baddr
& ~PAGE_MASK
;
169 mem
->size
= PAGE_ALIGN(vb
->size
+ offset
);
172 down_read(&mm
->mmap_sem
);
174 vma
= find_vma(mm
, untagged_baddr
);
178 if ((untagged_baddr
+ mem
->size
) > vma
->vm_end
)
182 prev_pfn
= 0; /* kill warning */
183 user_address
= untagged_baddr
;
185 while (pages_done
< (mem
->size
>> PAGE_SHIFT
)) {
186 ret
= follow_pfn(vma
, user_address
, &this_pfn
);
191 mem
->dma_handle
= (this_pfn
<< PAGE_SHIFT
) + offset
;
192 else if (this_pfn
!= (prev_pfn
+ 1))
199 user_address
+= PAGE_SIZE
;
204 up_read(¤t
->mm
->mmap_sem
);
209 static struct videobuf_buffer
*__videobuf_alloc(size_t size
)
211 struct videobuf_dma_contig_memory
*mem
;
212 struct videobuf_buffer
*vb
;
214 vb
= kzalloc(size
+ sizeof(*mem
), GFP_KERNEL
);
216 vb
->priv
= ((char *)vb
) + size
;
218 mem
->magic
= MAGIC_DC_MEM
;
224 static void *__videobuf_to_vaddr(struct videobuf_buffer
*buf
)
226 struct videobuf_dma_contig_memory
*mem
= buf
->priv
;
229 MAGIC_CHECK(mem
->magic
, MAGIC_DC_MEM
);
234 static int __videobuf_iolock(struct videobuf_queue
*q
,
235 struct videobuf_buffer
*vb
,
236 struct v4l2_framebuffer
*fbuf
)
238 struct videobuf_dma_contig_memory
*mem
= vb
->priv
;
241 MAGIC_CHECK(mem
->magic
, MAGIC_DC_MEM
);
243 switch (vb
->memory
) {
244 case V4L2_MEMORY_MMAP
:
245 dev_dbg(q
->dev
, "%s memory method MMAP\n", __func__
);
247 /* All handling should be done by __videobuf_mmap_mapper() */
249 dev_err(q
->dev
, "memory is not allocated/mmapped.\n");
253 case V4L2_MEMORY_USERPTR
:
254 dev_dbg(q
->dev
, "%s memory method USERPTR\n", __func__
);
256 /* handle pointer from user space */
258 return videobuf_dma_contig_user_get(mem
, vb
);
260 /* allocate memory for the read() method */
261 if (__videobuf_dc_alloc(q
->dev
, mem
, PAGE_ALIGN(vb
->size
),
265 case V4L2_MEMORY_OVERLAY
:
267 dev_dbg(q
->dev
, "%s memory method OVERLAY/unknown\n", __func__
);
274 static int __videobuf_mmap_mapper(struct videobuf_queue
*q
,
275 struct videobuf_buffer
*buf
,
276 struct vm_area_struct
*vma
)
278 struct videobuf_dma_contig_memory
*mem
;
279 struct videobuf_mapping
*map
;
282 dev_dbg(q
->dev
, "%s\n", __func__
);
284 /* create mapping + update buffer list */
285 map
= kzalloc(sizeof(struct videobuf_mapping
), GFP_KERNEL
);
292 buf
->baddr
= vma
->vm_start
;
296 MAGIC_CHECK(mem
->magic
, MAGIC_DC_MEM
);
298 if (__videobuf_dc_alloc(q
->dev
, mem
, PAGE_ALIGN(buf
->bsize
),
299 GFP_KERNEL
| __GFP_COMP
))
302 /* Try to remap memory */
303 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
305 /* the "vm_pgoff" is just used in v4l2 to find the
306 * corresponding buffer data structure which is allocated
307 * earlier and it does not mean the offset from the physical
308 * buffer start address as usual. So set it to 0 to pass
309 * the sanity check in vm_iomap_memory().
313 retval
= vm_iomap_memory(vma
, mem
->dma_handle
, mem
->size
);
315 dev_err(q
->dev
, "mmap: remap failed with error %d. ",
317 dma_free_coherent(q
->dev
, mem
->size
,
318 mem
->vaddr
, mem
->dma_handle
);
322 vma
->vm_ops
= &videobuf_vm_ops
;
323 vma
->vm_flags
|= VM_DONTEXPAND
;
324 vma
->vm_private_data
= map
;
326 dev_dbg(q
->dev
, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
327 map
, q
, vma
->vm_start
, vma
->vm_end
,
328 (long int)buf
->bsize
, vma
->vm_pgoff
, buf
->i
);
330 videobuf_vm_open(vma
);
339 static struct videobuf_qtype_ops qops
= {
340 .magic
= MAGIC_QTYPE_OPS
,
341 .alloc_vb
= __videobuf_alloc
,
342 .iolock
= __videobuf_iolock
,
343 .mmap_mapper
= __videobuf_mmap_mapper
,
344 .vaddr
= __videobuf_to_vaddr
,
347 void videobuf_queue_dma_contig_init(struct videobuf_queue
*q
,
348 const struct videobuf_queue_ops
*ops
,
351 enum v4l2_buf_type type
,
352 enum v4l2_field field
,
355 struct mutex
*ext_lock
)
357 videobuf_queue_core_init(q
, ops
, dev
, irqlock
, type
, field
, msize
,
358 priv
, &qops
, ext_lock
);
360 EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init
);
362 dma_addr_t
videobuf_to_dma_contig(struct videobuf_buffer
*buf
)
364 struct videobuf_dma_contig_memory
*mem
= buf
->priv
;
367 MAGIC_CHECK(mem
->magic
, MAGIC_DC_MEM
);
369 return mem
->dma_handle
;
371 EXPORT_SYMBOL_GPL(videobuf_to_dma_contig
);
373 void videobuf_dma_contig_free(struct videobuf_queue
*q
,
374 struct videobuf_buffer
*buf
)
376 struct videobuf_dma_contig_memory
*mem
= buf
->priv
;
378 /* mmapped memory can't be freed here, otherwise mmapped region
379 would be released, while still needed. In this case, the memory
380 release should happen inside videobuf_vm_close().
381 So, it should free memory only if the memory were allocated for
384 if (buf
->memory
!= V4L2_MEMORY_USERPTR
)
390 MAGIC_CHECK(mem
->magic
, MAGIC_DC_MEM
);
392 /* handle user space pointer case */
394 videobuf_dma_contig_user_put(mem
);
400 __videobuf_dc_free(q
->dev
, mem
);
404 EXPORT_SYMBOL_GPL(videobuf_dma_contig_free
);
406 MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
407 MODULE_AUTHOR("Magnus Damm");
408 MODULE_LICENSE("GPL");