1 // SPDX-License-Identifier: GPL-2.0-or-later
3 cx231xx_vbi.c - driver for Conexant Cx23100/101/102 USB video capture devices
5 Copyright (C) 2008 <srinivasa.deevi at conexant dot com>
11 #include <linux/init.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/bitmap.h>
16 #include <linux/i2c.h>
18 #include <linux/mutex.h>
19 #include <linux/slab.h>
21 #include <media/v4l2-common.h>
22 #include <media/v4l2-ioctl.h>
23 #include <media/drv-intf/msp3400.h>
24 #include <media/tuner.h>
26 #include "cx231xx-vbi.h"
28 static inline void print_err_status(struct cx231xx
*dev
, int packet
, int status
)
30 char *errmsg
= "Unknown";
34 errmsg
= "unlinked synchronously";
37 errmsg
= "unlinked asynchronously";
40 errmsg
= "Buffer error (overrun)";
43 errmsg
= "Stalled (device not responding)";
46 errmsg
= "Babble (bad cable?)";
49 errmsg
= "Bit-stuff error (bad cable?)";
52 errmsg
= "CRC/Timeout (could be anything)";
55 errmsg
= "Device does not respond";
60 "URB status %d [%s].\n", status
, errmsg
);
63 "URB packet %d, status %d [%s].\n",
64 packet
, status
, errmsg
);
69 * Controls the isoc copy of each urb packet
71 static inline int cx231xx_isoc_vbi_copy(struct cx231xx
*dev
, struct urb
*urb
)
73 struct cx231xx_dmaqueue
*dma_q
= urb
->context
;
75 unsigned char *p_buffer
;
76 u32 bytes_parsed
= 0, buffer_size
= 0;
82 if (dev
->state
& DEV_DISCONNECTED
)
85 if (urb
->status
< 0) {
86 print_err_status(dev
, -1, urb
->status
);
87 if (urb
->status
== -ENOENT
)
91 /* get buffer pointer and length */
92 p_buffer
= urb
->transfer_buffer
;
93 buffer_size
= urb
->actual_length
;
95 if (buffer_size
> 0) {
98 if (dma_q
->is_partial_line
) {
99 /* Handle the case where we were working on a partial
101 sav_eav
= dma_q
->last_sav
;
103 /* Check for a SAV/EAV overlapping the
106 sav_eav
= cx231xx_find_boundary_SAV_EAV(p_buffer
,
112 /* Get the first line if we have some portion of an SAV/EAV from
113 the last buffer or a partial line */
115 bytes_parsed
+= cx231xx_get_vbi_line(dev
, dma_q
,
116 sav_eav
, /* SAV/EAV */
117 p_buffer
+ bytes_parsed
, /* p_buffer */
118 buffer_size
- bytes_parsed
); /* buffer size */
121 /* Now parse data that is completely in this buffer */
122 dma_q
->is_partial_line
= 0;
124 while (bytes_parsed
< buffer_size
) {
127 sav_eav
= cx231xx_find_next_SAV_EAV(
128 p_buffer
+ bytes_parsed
, /* p_buffer */
129 buffer_size
- bytes_parsed
, /* buffer size */
130 &bytes_used
); /* bytes used to get SAV/EAV */
132 bytes_parsed
+= bytes_used
;
135 if (sav_eav
&& (bytes_parsed
< buffer_size
)) {
136 bytes_parsed
+= cx231xx_get_vbi_line(dev
,
137 dma_q
, sav_eav
, /* SAV/EAV */
138 p_buffer
+bytes_parsed
, /* p_buffer */
139 buffer_size
-bytes_parsed
);/*buf size*/
143 /* Save the last four bytes of the buffer so we can
144 check the buffer boundary condition next time */
145 memcpy(dma_q
->partial_buf
, p_buffer
+ buffer_size
- 4, 4);
152 /* ------------------------------------------------------------------
154 ------------------------------------------------------------------*/
156 static int vbi_queue_setup(struct vb2_queue
*vq
,
157 unsigned int *nbuffers
, unsigned int *nplanes
,
158 unsigned int sizes
[], struct device
*alloc_devs
[])
160 struct cx231xx
*dev
= vb2_get_drv_priv(vq
);
163 height
= ((dev
->norm
& V4L2_STD_625_50
) ?
164 PAL_VBI_LINES
: NTSC_VBI_LINES
);
167 sizes
[0] = (dev
->width
* height
* 2 * 2);
171 /* This is called *without* dev->slock held; please keep it that way */
172 static int vbi_buf_prepare(struct vb2_buffer
*vb
)
174 struct cx231xx
*dev
= vb2_get_drv_priv(vb
->vb2_queue
);
178 height
= ((dev
->norm
& V4L2_STD_625_50
) ?
179 PAL_VBI_LINES
: NTSC_VBI_LINES
);
180 size
= ((dev
->width
<< 1) * height
* 2);
182 if (vb2_plane_size(vb
, 0) < size
)
184 vb2_set_plane_payload(vb
, 0, size
);
188 static void vbi_buf_queue(struct vb2_buffer
*vb
)
190 struct cx231xx
*dev
= vb2_get_drv_priv(vb
->vb2_queue
);
191 struct cx231xx_buffer
*buf
=
192 container_of(vb
, struct cx231xx_buffer
, vb
.vb2_buf
);
193 struct cx231xx_dmaqueue
*vidq
= &dev
->vbi_mode
.vidq
;
196 spin_lock_irqsave(&dev
->vbi_mode
.slock
, flags
);
197 list_add_tail(&buf
->list
, &vidq
->active
);
198 spin_unlock_irqrestore(&dev
->vbi_mode
.slock
, flags
);
201 static void return_all_buffers(struct cx231xx
*dev
,
202 enum vb2_buffer_state state
)
204 struct cx231xx_dmaqueue
*vidq
= &dev
->vbi_mode
.vidq
;
205 struct cx231xx_buffer
*buf
, *node
;
208 spin_lock_irqsave(&dev
->vbi_mode
.slock
, flags
);
209 dev
->vbi_mode
.bulk_ctl
.buf
= NULL
;
210 list_for_each_entry_safe(buf
, node
, &vidq
->active
, list
) {
211 list_del(&buf
->list
);
212 vb2_buffer_done(&buf
->vb
.vb2_buf
, state
);
214 spin_unlock_irqrestore(&dev
->vbi_mode
.slock
, flags
);
217 static int vbi_start_streaming(struct vb2_queue
*vq
, unsigned int count
)
219 struct cx231xx
*dev
= vb2_get_drv_priv(vq
);
220 struct cx231xx_dmaqueue
*vidq
= &dev
->vbi_mode
.vidq
;
224 ret
= cx231xx_init_vbi_isoc(dev
, CX231XX_NUM_VBI_PACKETS
,
225 CX231XX_NUM_VBI_BUFS
,
226 dev
->vbi_mode
.alt_max_pkt_size
[0],
227 cx231xx_isoc_vbi_copy
);
229 return_all_buffers(dev
, VB2_BUF_STATE_QUEUED
);
233 static void vbi_stop_streaming(struct vb2_queue
*vq
)
235 struct cx231xx
*dev
= vb2_get_drv_priv(vq
);
237 return_all_buffers(dev
, VB2_BUF_STATE_ERROR
);
240 struct vb2_ops cx231xx_vbi_qops
= {
241 .queue_setup
= vbi_queue_setup
,
242 .buf_prepare
= vbi_buf_prepare
,
243 .buf_queue
= vbi_buf_queue
,
244 .start_streaming
= vbi_start_streaming
,
245 .stop_streaming
= vbi_stop_streaming
,
246 .wait_prepare
= vb2_ops_wait_prepare
,
247 .wait_finish
= vb2_ops_wait_finish
,
250 /* ------------------------------------------------------------------
252 ------------------------------------------------------------------*/
255 * IRQ callback, called by URB callback
257 static void cx231xx_irq_vbi_callback(struct urb
*urb
)
259 struct cx231xx_dmaqueue
*dma_q
= urb
->context
;
260 struct cx231xx_video_mode
*vmode
=
261 container_of(dma_q
, struct cx231xx_video_mode
, vidq
);
262 struct cx231xx
*dev
= container_of(vmode
, struct cx231xx
, vbi_mode
);
265 switch (urb
->status
) {
266 case 0: /* success */
267 case -ETIMEDOUT
: /* NAK */
269 case -ECONNRESET
: /* kill */
275 "urb completion error %d.\n", urb
->status
);
279 /* Copy data from URB */
280 spin_lock_irqsave(&dev
->vbi_mode
.slock
, flags
);
281 dev
->vbi_mode
.bulk_ctl
.bulk_copy(dev
, urb
);
282 spin_unlock_irqrestore(&dev
->vbi_mode
.slock
, flags
);
287 urb
->status
= usb_submit_urb(urb
, GFP_ATOMIC
);
289 dev_err(dev
->dev
, "urb resubmit failed (error=%i)\n",
295 * Stop and Deallocate URBs
297 void cx231xx_uninit_vbi_isoc(struct cx231xx
*dev
)
302 dev_dbg(dev
->dev
, "called cx231xx_uninit_vbi_isoc\n");
304 dev
->vbi_mode
.bulk_ctl
.nfields
= -1;
305 for (i
= 0; i
< dev
->vbi_mode
.bulk_ctl
.num_bufs
; i
++) {
306 urb
= dev
->vbi_mode
.bulk_ctl
.urb
[i
];
308 if (!irqs_disabled())
313 if (dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
]) {
315 kfree(dev
->vbi_mode
.bulk_ctl
.
317 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
] =
321 dev
->vbi_mode
.bulk_ctl
.urb
[i
] = NULL
;
323 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
] = NULL
;
326 kfree(dev
->vbi_mode
.bulk_ctl
.urb
);
327 kfree(dev
->vbi_mode
.bulk_ctl
.transfer_buffer
);
329 dev
->vbi_mode
.bulk_ctl
.urb
= NULL
;
330 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
= NULL
;
331 dev
->vbi_mode
.bulk_ctl
.num_bufs
= 0;
333 cx231xx_capture_start(dev
, 0, Vbi
);
335 EXPORT_SYMBOL_GPL(cx231xx_uninit_vbi_isoc
);
338 * Allocate URBs and start IRQ
340 int cx231xx_init_vbi_isoc(struct cx231xx
*dev
, int max_packets
,
341 int num_bufs
, int max_pkt_size
,
342 int (*bulk_copy
) (struct cx231xx
*dev
,
345 struct cx231xx_dmaqueue
*dma_q
= &dev
->vbi_mode
.vidq
;
351 dev_dbg(dev
->dev
, "called cx231xx_vbi_isoc\n");
353 /* De-allocates all pending stuff */
354 cx231xx_uninit_vbi_isoc(dev
);
356 /* clear if any halt */
357 usb_clear_halt(dev
->udev
,
358 usb_rcvbulkpipe(dev
->udev
,
359 dev
->vbi_mode
.end_point_addr
));
361 dev
->vbi_mode
.bulk_ctl
.bulk_copy
= bulk_copy
;
362 dev
->vbi_mode
.bulk_ctl
.num_bufs
= num_bufs
;
364 dma_q
->is_partial_line
= 0;
366 dma_q
->current_field
= -1;
367 dma_q
->bytes_left_in_line
= dev
->width
<< 1;
368 dma_q
->lines_per_field
= ((dev
->norm
& V4L2_STD_625_50
) ?
369 PAL_VBI_LINES
: NTSC_VBI_LINES
);
370 dma_q
->lines_completed
= 0;
371 for (i
= 0; i
< 8; i
++)
372 dma_q
->partial_buf
[i
] = 0;
374 dev
->vbi_mode
.bulk_ctl
.urb
= kcalloc(num_bufs
, sizeof(void *),
376 if (!dev
->vbi_mode
.bulk_ctl
.urb
) {
378 "cannot alloc memory for usb buffers\n");
382 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
=
383 kcalloc(num_bufs
, sizeof(void *), GFP_KERNEL
);
384 if (!dev
->vbi_mode
.bulk_ctl
.transfer_buffer
) {
386 "cannot allocate memory for usbtransfer\n");
387 kfree(dev
->vbi_mode
.bulk_ctl
.urb
);
391 dev
->vbi_mode
.bulk_ctl
.max_pkt_size
= max_pkt_size
;
392 dev
->vbi_mode
.bulk_ctl
.buf
= NULL
;
394 sb_size
= max_packets
* dev
->vbi_mode
.bulk_ctl
.max_pkt_size
;
396 /* allocate urbs and transfer buffers */
397 for (i
= 0; i
< dev
->vbi_mode
.bulk_ctl
.num_bufs
; i
++) {
399 urb
= usb_alloc_urb(0, GFP_KERNEL
);
401 cx231xx_uninit_vbi_isoc(dev
);
404 dev
->vbi_mode
.bulk_ctl
.urb
[i
] = urb
;
405 urb
->transfer_flags
= 0;
407 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
] =
408 kzalloc(sb_size
, GFP_KERNEL
);
409 if (!dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
]) {
411 "unable to allocate %i bytes for transfer buffer %i\n",
413 cx231xx_uninit_vbi_isoc(dev
);
417 pipe
= usb_rcvbulkpipe(dev
->udev
, dev
->vbi_mode
.end_point_addr
);
418 usb_fill_bulk_urb(urb
, dev
->udev
, pipe
,
419 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
],
420 sb_size
, cx231xx_irq_vbi_callback
, dma_q
);
423 init_waitqueue_head(&dma_q
->wq
);
425 /* submit urbs and enables IRQ */
426 for (i
= 0; i
< dev
->vbi_mode
.bulk_ctl
.num_bufs
; i
++) {
427 rc
= usb_submit_urb(dev
->vbi_mode
.bulk_ctl
.urb
[i
], GFP_ATOMIC
);
430 "submit of urb %i failed (error=%i)\n", i
, rc
);
431 cx231xx_uninit_vbi_isoc(dev
);
436 cx231xx_capture_start(dev
, 1, Vbi
);
440 EXPORT_SYMBOL_GPL(cx231xx_init_vbi_isoc
);
442 u32
cx231xx_get_vbi_line(struct cx231xx
*dev
, struct cx231xx_dmaqueue
*dma_q
,
443 u8 sav_eav
, u8
*p_buffer
, u32 buffer_size
)
445 u32 bytes_copied
= 0;
446 int current_field
= -1;
461 if (current_field
< 0)
464 dma_q
->last_sav
= sav_eav
;
467 cx231xx_copy_vbi_line(dev
, dma_q
, p_buffer
, buffer_size
,
474 * Announces that a buffer were filled and request the next
476 static inline void vbi_buffer_filled(struct cx231xx
*dev
,
477 struct cx231xx_dmaqueue
*dma_q
,
478 struct cx231xx_buffer
*buf
)
480 /* Advice that buffer was filled */
481 /* dev_dbg(dev->dev, "[%p/%d] wakeup\n", buf, buf->vb.index); */
483 buf
->vb
.sequence
= dma_q
->sequence
++;
484 buf
->vb
.vb2_buf
.timestamp
= ktime_get_ns();
486 dev
->vbi_mode
.bulk_ctl
.buf
= NULL
;
488 list_del(&buf
->list
);
489 vb2_buffer_done(&buf
->vb
.vb2_buf
, VB2_BUF_STATE_DONE
);
492 u32
cx231xx_copy_vbi_line(struct cx231xx
*dev
, struct cx231xx_dmaqueue
*dma_q
,
493 u8
*p_line
, u32 length
, int field_number
)
496 struct cx231xx_buffer
*buf
;
497 u32 _line_size
= dev
->width
* 2;
499 if (dma_q
->current_field
== -1) {
500 /* Just starting up */
501 cx231xx_reset_vbi_buffer(dev
, dma_q
);
504 if (dma_q
->current_field
!= field_number
)
505 dma_q
->lines_completed
= 0;
507 /* get the buffer pointer */
508 buf
= dev
->vbi_mode
.bulk_ctl
.buf
;
510 /* Remember the field number for next time */
511 dma_q
->current_field
= field_number
;
513 bytes_to_copy
= dma_q
->bytes_left_in_line
;
514 if (bytes_to_copy
> length
)
515 bytes_to_copy
= length
;
517 if (dma_q
->lines_completed
>= dma_q
->lines_per_field
) {
518 dma_q
->bytes_left_in_line
-= bytes_to_copy
;
519 dma_q
->is_partial_line
=
520 (dma_q
->bytes_left_in_line
== 0) ? 0 : 1;
524 dma_q
->is_partial_line
= 1;
526 /* If we don't have a buffer, just return the number of bytes we would
527 have copied if we had a buffer. */
529 dma_q
->bytes_left_in_line
-= bytes_to_copy
;
530 dma_q
->is_partial_line
=
531 (dma_q
->bytes_left_in_line
== 0) ? 0 : 1;
532 return bytes_to_copy
;
535 /* copy the data to video buffer */
536 cx231xx_do_vbi_copy(dev
, dma_q
, p_line
, bytes_to_copy
);
538 dma_q
->pos
+= bytes_to_copy
;
539 dma_q
->bytes_left_in_line
-= bytes_to_copy
;
541 if (dma_q
->bytes_left_in_line
== 0) {
543 dma_q
->bytes_left_in_line
= _line_size
;
544 dma_q
->lines_completed
++;
545 dma_q
->is_partial_line
= 0;
547 if (cx231xx_is_vbi_buffer_done(dev
, dma_q
) && buf
) {
549 vbi_buffer_filled(dev
, dma_q
, buf
);
552 dma_q
->lines_completed
= 0;
553 cx231xx_reset_vbi_buffer(dev
, dma_q
);
557 return bytes_to_copy
;
561 * video-buf generic routine to get the next available buffer
563 static inline void get_next_vbi_buf(struct cx231xx_dmaqueue
*dma_q
,
564 struct cx231xx_buffer
**buf
)
566 struct cx231xx_video_mode
*vmode
=
567 container_of(dma_q
, struct cx231xx_video_mode
, vidq
);
568 struct cx231xx
*dev
= container_of(vmode
, struct cx231xx
, vbi_mode
);
571 if (list_empty(&dma_q
->active
)) {
572 dev_err(dev
->dev
, "No active queue to serve\n");
573 dev
->vbi_mode
.bulk_ctl
.buf
= NULL
;
578 /* Get the next buffer */
579 *buf
= list_entry(dma_q
->active
.next
, struct cx231xx_buffer
, list
);
581 /* Cleans up buffer - Useful for testing for frame/URB loss */
582 outp
= vb2_plane_vaddr(&(*buf
)->vb
.vb2_buf
, 0);
583 memset(outp
, 0, vb2_plane_size(&(*buf
)->vb
.vb2_buf
, 0));
585 dev
->vbi_mode
.bulk_ctl
.buf
= *buf
;
590 void cx231xx_reset_vbi_buffer(struct cx231xx
*dev
,
591 struct cx231xx_dmaqueue
*dma_q
)
593 struct cx231xx_buffer
*buf
;
595 buf
= dev
->vbi_mode
.bulk_ctl
.buf
;
598 /* first try to get the buffer */
599 get_next_vbi_buf(dma_q
, &buf
);
602 dma_q
->current_field
= -1;
605 dma_q
->bytes_left_in_line
= dev
->width
<< 1;
606 dma_q
->lines_completed
= 0;
609 int cx231xx_do_vbi_copy(struct cx231xx
*dev
, struct cx231xx_dmaqueue
*dma_q
,
610 u8
*p_buffer
, u32 bytes_to_copy
)
612 u8
*p_out_buffer
= NULL
;
613 u32 current_line_bytes_copied
= 0;
614 struct cx231xx_buffer
*buf
;
615 u32 _line_size
= dev
->width
<< 1;
619 buf
= dev
->vbi_mode
.bulk_ctl
.buf
;
624 p_out_buffer
= vb2_plane_vaddr(&buf
->vb
.vb2_buf
, 0);
626 if (dma_q
->bytes_left_in_line
!= _line_size
) {
627 current_line_bytes_copied
=
628 _line_size
- dma_q
->bytes_left_in_line
;
631 offset
= (dma_q
->lines_completed
* _line_size
) +
632 current_line_bytes_copied
;
634 if (dma_q
->current_field
== 2) {
635 /* Populate the second half of the frame */
636 offset
+= (dev
->width
* 2 * dma_q
->lines_per_field
);
639 /* prepare destination address */
640 startwrite
= p_out_buffer
+ offset
;
642 lencopy
= dma_q
->bytes_left_in_line
> bytes_to_copy
?
643 bytes_to_copy
: dma_q
->bytes_left_in_line
;
645 memcpy(startwrite
, p_buffer
, lencopy
);
650 u8
cx231xx_is_vbi_buffer_done(struct cx231xx
*dev
,
651 struct cx231xx_dmaqueue
*dma_q
)
655 height
= ((dev
->norm
& V4L2_STD_625_50
) ?
656 PAL_VBI_LINES
: NTSC_VBI_LINES
);
657 if (dma_q
->lines_completed
== height
&& dma_q
->current_field
== 2)