1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2015 Samsung Electronics
7 * Author: jh1009.sung@samsung.com
10 #include <linux/err.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
15 #include <media/dvbdev.h>
16 #include <media/dvb_vb2.h>
18 #define DVB_V2_MAX_SIZE (4096 * 188)
21 module_param(vb2_debug
, int, 0644);
23 #define dprintk(level, fmt, arg...) \
25 if (vb2_debug >= level) \
26 pr_info("vb2: %s: " fmt, __func__, ## arg); \
29 static int _queue_setup(struct vb2_queue
*vq
,
30 unsigned int *nbuffers
, unsigned int *nplanes
,
31 unsigned int sizes
[], struct device
*alloc_devs
[])
33 struct dvb_vb2_ctx
*ctx
= vb2_get_drv_priv(vq
);
35 ctx
->buf_cnt
= *nbuffers
;
37 sizes
[0] = ctx
->buf_siz
;
40 * videobuf2-vmalloc allocator is context-less so no need to set
44 dprintk(3, "[%s] count=%d, size=%d\n", ctx
->name
,
50 static int _buffer_prepare(struct vb2_buffer
*vb
)
52 struct dvb_vb2_ctx
*ctx
= vb2_get_drv_priv(vb
->vb2_queue
);
53 unsigned long size
= ctx
->buf_siz
;
55 if (vb2_plane_size(vb
, 0) < size
) {
56 dprintk(1, "[%s] data will not fit into plane (%lu < %lu)\n",
57 ctx
->name
, vb2_plane_size(vb
, 0), size
);
61 vb2_set_plane_payload(vb
, 0, size
);
62 dprintk(3, "[%s]\n", ctx
->name
);
67 static void _buffer_queue(struct vb2_buffer
*vb
)
69 struct dvb_vb2_ctx
*ctx
= vb2_get_drv_priv(vb
->vb2_queue
);
70 struct dvb_buffer
*buf
= container_of(vb
, struct dvb_buffer
, vb
);
71 unsigned long flags
= 0;
73 spin_lock_irqsave(&ctx
->slock
, flags
);
74 list_add_tail(&buf
->list
, &ctx
->dvb_q
);
75 spin_unlock_irqrestore(&ctx
->slock
, flags
);
77 dprintk(3, "[%s]\n", ctx
->name
);
80 static int _start_streaming(struct vb2_queue
*vq
, unsigned int count
)
82 struct dvb_vb2_ctx
*ctx
= vb2_get_drv_priv(vq
);
84 dprintk(3, "[%s] count=%d\n", ctx
->name
, count
);
88 static void _stop_streaming(struct vb2_queue
*vq
)
90 struct dvb_vb2_ctx
*ctx
= vb2_get_drv_priv(vq
);
91 struct dvb_buffer
*buf
;
92 unsigned long flags
= 0;
94 dprintk(3, "[%s]\n", ctx
->name
);
96 spin_lock_irqsave(&ctx
->slock
, flags
);
97 while (!list_empty(&ctx
->dvb_q
)) {
98 buf
= list_entry(ctx
->dvb_q
.next
,
99 struct dvb_buffer
, list
);
100 vb2_buffer_done(&buf
->vb
, VB2_BUF_STATE_ERROR
);
101 list_del(&buf
->list
);
103 spin_unlock_irqrestore(&ctx
->slock
, flags
);
106 static void _dmxdev_lock(struct vb2_queue
*vq
)
108 struct dvb_vb2_ctx
*ctx
= vb2_get_drv_priv(vq
);
110 mutex_lock(&ctx
->mutex
);
111 dprintk(3, "[%s]\n", ctx
->name
);
114 static void _dmxdev_unlock(struct vb2_queue
*vq
)
116 struct dvb_vb2_ctx
*ctx
= vb2_get_drv_priv(vq
);
118 if (mutex_is_locked(&ctx
->mutex
))
119 mutex_unlock(&ctx
->mutex
);
120 dprintk(3, "[%s]\n", ctx
->name
);
123 static const struct vb2_ops dvb_vb2_qops
= {
124 .queue_setup
= _queue_setup
,
125 .buf_prepare
= _buffer_prepare
,
126 .buf_queue
= _buffer_queue
,
127 .start_streaming
= _start_streaming
,
128 .stop_streaming
= _stop_streaming
,
129 .wait_prepare
= _dmxdev_unlock
,
130 .wait_finish
= _dmxdev_lock
,
133 static void _fill_dmx_buffer(struct vb2_buffer
*vb
, void *pb
)
135 struct dvb_vb2_ctx
*ctx
= vb2_get_drv_priv(vb
->vb2_queue
);
136 struct dmx_buffer
*b
= pb
;
138 b
->index
= vb
->index
;
139 b
->length
= vb
->planes
[0].length
;
140 b
->bytesused
= vb
->planes
[0].bytesused
;
141 b
->offset
= vb
->planes
[0].m
.offset
;
142 dprintk(3, "[%s]\n", ctx
->name
);
145 static int _fill_vb2_buffer(struct vb2_buffer
*vb
, struct vb2_plane
*planes
)
147 struct dvb_vb2_ctx
*ctx
= vb2_get_drv_priv(vb
->vb2_queue
);
149 planes
[0].bytesused
= 0;
150 dprintk(3, "[%s]\n", ctx
->name
);
155 static const struct vb2_buf_ops dvb_vb2_buf_ops
= {
156 .fill_user_buffer
= _fill_dmx_buffer
,
157 .fill_vb2_buffer
= _fill_vb2_buffer
,
161 * Videobuf operations
163 int dvb_vb2_init(struct dvb_vb2_ctx
*ctx
, const char *name
, int nonblocking
)
165 struct vb2_queue
*q
= &ctx
->vb_q
;
168 memset(ctx
, 0, sizeof(struct dvb_vb2_ctx
));
169 q
->type
= DVB_BUF_TYPE_CAPTURE
;
170 /**only mmap is supported currently*/
171 q
->io_modes
= VB2_MMAP
;
173 q
->buf_struct_size
= sizeof(struct dvb_buffer
);
174 q
->min_queued_buffers
= 1;
175 q
->ops
= &dvb_vb2_qops
;
176 q
->mem_ops
= &vb2_vmalloc_memops
;
177 q
->buf_ops
= &dvb_vb2_buf_ops
;
178 ret
= vb2_core_queue_init(q
);
180 ctx
->state
= DVB_VB2_STATE_NONE
;
181 dprintk(1, "[%s] errno=%d\n", ctx
->name
, ret
);
185 mutex_init(&ctx
->mutex
);
186 spin_lock_init(&ctx
->slock
);
187 INIT_LIST_HEAD(&ctx
->dvb_q
);
189 strscpy(ctx
->name
, name
, DVB_VB2_NAME_MAX
);
190 ctx
->nonblocking
= nonblocking
;
191 ctx
->state
= DVB_VB2_STATE_INIT
;
193 dprintk(3, "[%s]\n", ctx
->name
);
198 int dvb_vb2_release(struct dvb_vb2_ctx
*ctx
)
200 struct vb2_queue
*q
= (struct vb2_queue
*)&ctx
->vb_q
;
202 if (ctx
->state
& DVB_VB2_STATE_INIT
)
203 vb2_core_queue_release(q
);
205 ctx
->state
= DVB_VB2_STATE_NONE
;
206 dprintk(3, "[%s]\n", ctx
->name
);
211 int dvb_vb2_stream_on(struct dvb_vb2_ctx
*ctx
)
213 struct vb2_queue
*q
= &ctx
->vb_q
;
216 ret
= vb2_core_streamon(q
, q
->type
);
218 ctx
->state
= DVB_VB2_STATE_NONE
;
219 dprintk(1, "[%s] errno=%d\n", ctx
->name
, ret
);
222 ctx
->state
|= DVB_VB2_STATE_STREAMON
;
223 dprintk(3, "[%s]\n", ctx
->name
);
228 int dvb_vb2_stream_off(struct dvb_vb2_ctx
*ctx
)
230 struct vb2_queue
*q
= (struct vb2_queue
*)&ctx
->vb_q
;
233 ctx
->state
&= ~DVB_VB2_STATE_STREAMON
;
234 ret
= vb2_core_streamoff(q
, q
->type
);
236 ctx
->state
= DVB_VB2_STATE_NONE
;
237 dprintk(1, "[%s] errno=%d\n", ctx
->name
, ret
);
240 dprintk(3, "[%s]\n", ctx
->name
);
245 int dvb_vb2_is_streaming(struct dvb_vb2_ctx
*ctx
)
247 return (ctx
->state
& DVB_VB2_STATE_STREAMON
);
250 int dvb_vb2_fill_buffer(struct dvb_vb2_ctx
*ctx
,
251 const unsigned char *src
, int len
,
252 enum dmx_buffer_flags
*buffer_flags
)
254 unsigned long flags
= 0;
257 unsigned char *psrc
= (unsigned char *)src
;
261 * normal case: This func is called twice from demux driver
262 * one with valid src pointer, second time with NULL pointer
266 spin_lock_irqsave(&ctx
->slock
, flags
);
267 if (buffer_flags
&& *buffer_flags
) {
268 ctx
->flags
|= *buffer_flags
;
273 if (list_empty(&ctx
->dvb_q
)) {
274 dprintk(3, "[%s] Buffer overflow!!!\n",
279 ctx
->buf
= list_entry(ctx
->dvb_q
.next
,
280 struct dvb_buffer
, list
);
281 ctx
->remain
= vb2_plane_size(&ctx
->buf
->vb
, 0);
285 if (!dvb_vb2_is_streaming(ctx
)) {
286 vb2_buffer_done(&ctx
->buf
->vb
, VB2_BUF_STATE_ERROR
);
287 list_del(&ctx
->buf
->list
);
293 ll
= min(todo
, ctx
->remain
);
294 vbuf
= vb2_plane_vaddr(&ctx
->buf
->vb
, 0);
295 memcpy(vbuf
+ ctx
->offset
, psrc
, ll
);
302 if (ctx
->remain
== 0) {
303 vb2_buffer_done(&ctx
->buf
->vb
, VB2_BUF_STATE_DONE
);
304 list_del(&ctx
->buf
->list
);
309 if (ctx
->nonblocking
&& ctx
->buf
) {
310 vb2_set_plane_payload(&ctx
->buf
->vb
, 0, ll
);
311 vb2_buffer_done(&ctx
->buf
->vb
, VB2_BUF_STATE_DONE
);
312 list_del(&ctx
->buf
->list
);
315 spin_unlock_irqrestore(&ctx
->slock
, flags
);
318 dprintk(1, "[%s] %d bytes are dropped.\n", ctx
->name
, todo
);
320 dprintk(3, "[%s]\n", ctx
->name
);
322 dprintk(3, "[%s] %d bytes are copied\n", ctx
->name
, len
- todo
);
326 int dvb_vb2_reqbufs(struct dvb_vb2_ctx
*ctx
, struct dmx_requestbuffers
*req
)
330 /* Adjust size to a sane value */
331 if (req
->size
> DVB_V2_MAX_SIZE
)
332 req
->size
= DVB_V2_MAX_SIZE
;
334 /* FIXME: round req->size to a 188 or 204 multiple */
336 ctx
->buf_siz
= req
->size
;
337 ctx
->buf_cnt
= req
->count
;
338 ret
= vb2_core_reqbufs(&ctx
->vb_q
, VB2_MEMORY_MMAP
, 0, &req
->count
);
340 ctx
->state
= DVB_VB2_STATE_NONE
;
341 dprintk(1, "[%s] count=%d size=%d errno=%d\n", ctx
->name
,
342 ctx
->buf_cnt
, ctx
->buf_siz
, ret
);
345 ctx
->state
|= DVB_VB2_STATE_REQBUFS
;
346 dprintk(3, "[%s] count=%d size=%d\n", ctx
->name
,
347 ctx
->buf_cnt
, ctx
->buf_siz
);
352 int dvb_vb2_querybuf(struct dvb_vb2_ctx
*ctx
, struct dmx_buffer
*b
)
354 struct vb2_queue
*q
= &ctx
->vb_q
;
355 struct vb2_buffer
*vb2
= vb2_get_buffer(q
, b
->index
);
358 dprintk(1, "[%s] invalid buffer index\n", ctx
->name
);
361 vb2_core_querybuf(&ctx
->vb_q
, vb2
, b
);
362 dprintk(3, "[%s] index=%d\n", ctx
->name
, b
->index
);
366 int dvb_vb2_expbuf(struct dvb_vb2_ctx
*ctx
, struct dmx_exportbuffer
*exp
)
368 struct vb2_queue
*q
= &ctx
->vb_q
;
369 struct vb2_buffer
*vb2
= vb2_get_buffer(q
, exp
->index
);
373 dprintk(1, "[%s] invalid buffer index\n", ctx
->name
);
377 ret
= vb2_core_expbuf(&ctx
->vb_q
, &exp
->fd
, q
->type
, vb2
,
380 dprintk(1, "[%s] index=%d errno=%d\n", ctx
->name
,
384 dprintk(3, "[%s] index=%d fd=%d\n", ctx
->name
, exp
->index
, exp
->fd
);
389 int dvb_vb2_qbuf(struct dvb_vb2_ctx
*ctx
, struct dmx_buffer
*b
)
391 struct vb2_queue
*q
= &ctx
->vb_q
;
392 struct vb2_buffer
*vb2
= vb2_get_buffer(q
, b
->index
);
396 dprintk(1, "[%s] invalid buffer index\n", ctx
->name
);
399 ret
= vb2_core_qbuf(&ctx
->vb_q
, vb2
, b
, NULL
);
401 dprintk(1, "[%s] index=%d errno=%d\n", ctx
->name
,
405 dprintk(5, "[%s] index=%d\n", ctx
->name
, b
->index
);
410 int dvb_vb2_dqbuf(struct dvb_vb2_ctx
*ctx
, struct dmx_buffer
*b
)
415 ret
= vb2_core_dqbuf(&ctx
->vb_q
, &b
->index
, b
, ctx
->nonblocking
);
417 dprintk(1, "[%s] errno=%d\n", ctx
->name
, ret
);
421 spin_lock_irqsave(&ctx
->slock
, flags
);
422 b
->count
= ctx
->count
++;
423 b
->flags
= ctx
->flags
;
425 spin_unlock_irqrestore(&ctx
->slock
, flags
);
427 dprintk(5, "[%s] index=%d, count=%d, flags=%d\n",
428 ctx
->name
, b
->index
, ctx
->count
, b
->flags
);
434 int dvb_vb2_mmap(struct dvb_vb2_ctx
*ctx
, struct vm_area_struct
*vma
)
438 ret
= vb2_mmap(&ctx
->vb_q
, vma
);
440 dprintk(1, "[%s] errno=%d\n", ctx
->name
, ret
);
443 dprintk(3, "[%s] ret=%d\n", ctx
->name
, ret
);
448 __poll_t
dvb_vb2_poll(struct dvb_vb2_ctx
*ctx
, struct file
*file
,
451 dprintk(3, "[%s]\n", ctx
->name
);
452 return vb2_core_poll(&ctx
->vb_q
, file
, wait
);