1 // SPDX-License-Identifier: GPL-2.0-or-later
3 cx231xx_vbi.c - driver for Conexant Cx23100/101/102 USB video capture devices
5 Copyright (C) 2008 <srinivasa.deevi at conexant dot com>
11 #include <linux/init.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/bitmap.h>
16 #include <linux/i2c.h>
18 #include <linux/mutex.h>
19 #include <linux/slab.h>
21 #include <media/v4l2-common.h>
22 #include <media/v4l2-ioctl.h>
23 #include <media/drv-intf/msp3400.h>
24 #include <media/tuner.h>
26 #include "cx231xx-vbi.h"
28 static inline void print_err_status(struct cx231xx
*dev
, int packet
, int status
)
30 char *errmsg
= "Unknown";
34 errmsg
= "unlinked synchronously";
37 errmsg
= "unlinked asynchronously";
40 errmsg
= "Buffer error (overrun)";
43 errmsg
= "Stalled (device not responding)";
46 errmsg
= "Babble (bad cable?)";
49 errmsg
= "Bit-stuff error (bad cable?)";
52 errmsg
= "CRC/Timeout (could be anything)";
55 errmsg
= "Device does not respond";
60 "URB status %d [%s].\n", status
, errmsg
);
63 "URB packet %d, status %d [%s].\n",
64 packet
, status
, errmsg
);
69 * Controls the isoc copy of each urb packet
71 static inline int cx231xx_isoc_vbi_copy(struct cx231xx
*dev
, struct urb
*urb
)
73 struct cx231xx_dmaqueue
*dma_q
= urb
->context
;
75 unsigned char *p_buffer
;
76 u32 bytes_parsed
= 0, buffer_size
= 0;
82 if (dev
->state
& DEV_DISCONNECTED
)
85 if (urb
->status
< 0) {
86 print_err_status(dev
, -1, urb
->status
);
87 if (urb
->status
== -ENOENT
)
91 /* get buffer pointer and length */
92 p_buffer
= urb
->transfer_buffer
;
93 buffer_size
= urb
->actual_length
;
95 if (buffer_size
> 0) {
98 if (dma_q
->is_partial_line
) {
99 /* Handle the case where we were working on a partial
101 sav_eav
= dma_q
->last_sav
;
103 /* Check for a SAV/EAV overlapping the
106 sav_eav
= cx231xx_find_boundary_SAV_EAV(p_buffer
,
112 /* Get the first line if we have some portion of an SAV/EAV from
113 the last buffer or a partial line */
115 bytes_parsed
+= cx231xx_get_vbi_line(dev
, dma_q
,
116 sav_eav
, /* SAV/EAV */
117 p_buffer
+ bytes_parsed
, /* p_buffer */
118 buffer_size
- bytes_parsed
); /* buffer size */
121 /* Now parse data that is completely in this buffer */
122 dma_q
->is_partial_line
= 0;
124 while (bytes_parsed
< buffer_size
) {
127 sav_eav
= cx231xx_find_next_SAV_EAV(
128 p_buffer
+ bytes_parsed
, /* p_buffer */
129 buffer_size
- bytes_parsed
, /* buffer size */
130 &bytes_used
); /* bytes used to get SAV/EAV */
132 bytes_parsed
+= bytes_used
;
135 if (sav_eav
&& (bytes_parsed
< buffer_size
)) {
136 bytes_parsed
+= cx231xx_get_vbi_line(dev
,
137 dma_q
, sav_eav
, /* SAV/EAV */
138 p_buffer
+bytes_parsed
, /* p_buffer */
139 buffer_size
-bytes_parsed
);/*buf size*/
143 /* Save the last four bytes of the buffer so we can
144 check the buffer boundary condition next time */
145 memcpy(dma_q
->partial_buf
, p_buffer
+ buffer_size
- 4, 4);
152 /* ------------------------------------------------------------------
154 ------------------------------------------------------------------*/
156 static int vbi_queue_setup(struct vb2_queue
*vq
,
157 unsigned int *nbuffers
, unsigned int *nplanes
,
158 unsigned int sizes
[], struct device
*alloc_devs
[])
160 struct cx231xx
*dev
= vb2_get_drv_priv(vq
);
163 height
= ((dev
->norm
& V4L2_STD_625_50
) ?
164 PAL_VBI_LINES
: NTSC_VBI_LINES
);
167 sizes
[0] = (dev
->width
* height
* 2 * 2);
171 /* This is called *without* dev->slock held; please keep it that way */
172 static int vbi_buf_prepare(struct vb2_buffer
*vb
)
174 struct cx231xx
*dev
= vb2_get_drv_priv(vb
->vb2_queue
);
178 height
= ((dev
->norm
& V4L2_STD_625_50
) ?
179 PAL_VBI_LINES
: NTSC_VBI_LINES
);
180 size
= ((dev
->width
<< 1) * height
* 2);
182 if (vb2_plane_size(vb
, 0) < size
)
184 vb2_set_plane_payload(vb
, 0, size
);
188 static void vbi_buf_queue(struct vb2_buffer
*vb
)
190 struct cx231xx
*dev
= vb2_get_drv_priv(vb
->vb2_queue
);
191 struct cx231xx_buffer
*buf
=
192 container_of(vb
, struct cx231xx_buffer
, vb
.vb2_buf
);
193 struct cx231xx_dmaqueue
*vidq
= &dev
->vbi_mode
.vidq
;
196 spin_lock_irqsave(&dev
->vbi_mode
.slock
, flags
);
197 list_add_tail(&buf
->list
, &vidq
->active
);
198 spin_unlock_irqrestore(&dev
->vbi_mode
.slock
, flags
);
201 static void return_all_buffers(struct cx231xx
*dev
,
202 enum vb2_buffer_state state
)
204 struct cx231xx_dmaqueue
*vidq
= &dev
->vbi_mode
.vidq
;
205 struct cx231xx_buffer
*buf
, *node
;
208 spin_lock_irqsave(&dev
->vbi_mode
.slock
, flags
);
209 dev
->vbi_mode
.bulk_ctl
.buf
= NULL
;
210 list_for_each_entry_safe(buf
, node
, &vidq
->active
, list
) {
211 list_del(&buf
->list
);
212 vb2_buffer_done(&buf
->vb
.vb2_buf
, state
);
214 spin_unlock_irqrestore(&dev
->vbi_mode
.slock
, flags
);
217 static int vbi_start_streaming(struct vb2_queue
*vq
, unsigned int count
)
219 struct cx231xx
*dev
= vb2_get_drv_priv(vq
);
220 struct cx231xx_dmaqueue
*vidq
= &dev
->vbi_mode
.vidq
;
224 ret
= cx231xx_init_vbi_isoc(dev
, CX231XX_NUM_VBI_PACKETS
,
225 CX231XX_NUM_VBI_BUFS
,
226 dev
->vbi_mode
.alt_max_pkt_size
[0],
227 cx231xx_isoc_vbi_copy
);
229 return_all_buffers(dev
, VB2_BUF_STATE_QUEUED
);
233 static void vbi_stop_streaming(struct vb2_queue
*vq
)
235 struct cx231xx
*dev
= vb2_get_drv_priv(vq
);
237 return_all_buffers(dev
, VB2_BUF_STATE_ERROR
);
240 struct vb2_ops cx231xx_vbi_qops
= {
241 .queue_setup
= vbi_queue_setup
,
242 .buf_prepare
= vbi_buf_prepare
,
243 .buf_queue
= vbi_buf_queue
,
244 .start_streaming
= vbi_start_streaming
,
245 .stop_streaming
= vbi_stop_streaming
,
248 /* ------------------------------------------------------------------
250 ------------------------------------------------------------------*/
253 * IRQ callback, called by URB callback
255 static void cx231xx_irq_vbi_callback(struct urb
*urb
)
257 struct cx231xx_dmaqueue
*dma_q
= urb
->context
;
258 struct cx231xx_video_mode
*vmode
=
259 container_of(dma_q
, struct cx231xx_video_mode
, vidq
);
260 struct cx231xx
*dev
= container_of(vmode
, struct cx231xx
, vbi_mode
);
263 switch (urb
->status
) {
264 case 0: /* success */
265 case -ETIMEDOUT
: /* NAK */
267 case -ECONNRESET
: /* kill */
273 "urb completion error %d.\n", urb
->status
);
277 /* Copy data from URB */
278 spin_lock_irqsave(&dev
->vbi_mode
.slock
, flags
);
279 dev
->vbi_mode
.bulk_ctl
.bulk_copy(dev
, urb
);
280 spin_unlock_irqrestore(&dev
->vbi_mode
.slock
, flags
);
285 urb
->status
= usb_submit_urb(urb
, GFP_ATOMIC
);
287 dev_err(dev
->dev
, "urb resubmit failed (error=%i)\n",
293 * Stop and Deallocate URBs
295 void cx231xx_uninit_vbi_isoc(struct cx231xx
*dev
)
300 dev_dbg(dev
->dev
, "called cx231xx_uninit_vbi_isoc\n");
302 dev
->vbi_mode
.bulk_ctl
.nfields
= -1;
303 for (i
= 0; i
< dev
->vbi_mode
.bulk_ctl
.num_bufs
; i
++) {
304 urb
= dev
->vbi_mode
.bulk_ctl
.urb
[i
];
306 if (!irqs_disabled())
311 if (dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
]) {
313 kfree(dev
->vbi_mode
.bulk_ctl
.
315 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
] =
319 dev
->vbi_mode
.bulk_ctl
.urb
[i
] = NULL
;
321 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
] = NULL
;
324 kfree(dev
->vbi_mode
.bulk_ctl
.urb
);
325 kfree(dev
->vbi_mode
.bulk_ctl
.transfer_buffer
);
327 dev
->vbi_mode
.bulk_ctl
.urb
= NULL
;
328 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
= NULL
;
329 dev
->vbi_mode
.bulk_ctl
.num_bufs
= 0;
331 cx231xx_capture_start(dev
, 0, Vbi
);
333 EXPORT_SYMBOL_GPL(cx231xx_uninit_vbi_isoc
);
336 * Allocate URBs and start IRQ
338 int cx231xx_init_vbi_isoc(struct cx231xx
*dev
, int max_packets
,
339 int num_bufs
, int max_pkt_size
,
340 int (*bulk_copy
) (struct cx231xx
*dev
,
343 struct cx231xx_dmaqueue
*dma_q
= &dev
->vbi_mode
.vidq
;
349 dev_dbg(dev
->dev
, "called cx231xx_vbi_isoc\n");
351 /* De-allocates all pending stuff */
352 cx231xx_uninit_vbi_isoc(dev
);
354 /* clear if any halt */
355 usb_clear_halt(dev
->udev
,
356 usb_rcvbulkpipe(dev
->udev
,
357 dev
->vbi_mode
.end_point_addr
));
359 dev
->vbi_mode
.bulk_ctl
.bulk_copy
= bulk_copy
;
360 dev
->vbi_mode
.bulk_ctl
.num_bufs
= num_bufs
;
362 dma_q
->is_partial_line
= 0;
364 dma_q
->current_field
= -1;
365 dma_q
->bytes_left_in_line
= dev
->width
<< 1;
366 dma_q
->lines_per_field
= ((dev
->norm
& V4L2_STD_625_50
) ?
367 PAL_VBI_LINES
: NTSC_VBI_LINES
);
368 dma_q
->lines_completed
= 0;
369 for (i
= 0; i
< 8; i
++)
370 dma_q
->partial_buf
[i
] = 0;
372 dev
->vbi_mode
.bulk_ctl
.urb
= kcalloc(num_bufs
, sizeof(void *),
374 if (!dev
->vbi_mode
.bulk_ctl
.urb
) {
376 "cannot alloc memory for usb buffers\n");
380 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
=
381 kcalloc(num_bufs
, sizeof(void *), GFP_KERNEL
);
382 if (!dev
->vbi_mode
.bulk_ctl
.transfer_buffer
) {
384 "cannot allocate memory for usbtransfer\n");
385 kfree(dev
->vbi_mode
.bulk_ctl
.urb
);
389 dev
->vbi_mode
.bulk_ctl
.max_pkt_size
= max_pkt_size
;
390 dev
->vbi_mode
.bulk_ctl
.buf
= NULL
;
392 sb_size
= max_packets
* dev
->vbi_mode
.bulk_ctl
.max_pkt_size
;
394 /* allocate urbs and transfer buffers */
395 for (i
= 0; i
< dev
->vbi_mode
.bulk_ctl
.num_bufs
; i
++) {
397 urb
= usb_alloc_urb(0, GFP_KERNEL
);
399 cx231xx_uninit_vbi_isoc(dev
);
402 dev
->vbi_mode
.bulk_ctl
.urb
[i
] = urb
;
403 urb
->transfer_flags
= 0;
405 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
] =
406 kzalloc(sb_size
, GFP_KERNEL
);
407 if (!dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
]) {
409 "unable to allocate %i bytes for transfer buffer %i\n",
411 cx231xx_uninit_vbi_isoc(dev
);
415 pipe
= usb_rcvbulkpipe(dev
->udev
, dev
->vbi_mode
.end_point_addr
);
416 usb_fill_bulk_urb(urb
, dev
->udev
, pipe
,
417 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
],
418 sb_size
, cx231xx_irq_vbi_callback
, dma_q
);
421 init_waitqueue_head(&dma_q
->wq
);
423 /* submit urbs and enables IRQ */
424 for (i
= 0; i
< dev
->vbi_mode
.bulk_ctl
.num_bufs
; i
++) {
425 rc
= usb_submit_urb(dev
->vbi_mode
.bulk_ctl
.urb
[i
], GFP_ATOMIC
);
428 "submit of urb %i failed (error=%i)\n", i
, rc
);
429 cx231xx_uninit_vbi_isoc(dev
);
434 cx231xx_capture_start(dev
, 1, Vbi
);
438 EXPORT_SYMBOL_GPL(cx231xx_init_vbi_isoc
);
440 u32
cx231xx_get_vbi_line(struct cx231xx
*dev
, struct cx231xx_dmaqueue
*dma_q
,
441 u8 sav_eav
, u8
*p_buffer
, u32 buffer_size
)
443 u32 bytes_copied
= 0;
444 int current_field
= -1;
459 if (current_field
< 0)
462 dma_q
->last_sav
= sav_eav
;
465 cx231xx_copy_vbi_line(dev
, dma_q
, p_buffer
, buffer_size
,
472 * Announces that a buffer were filled and request the next
474 static inline void vbi_buffer_filled(struct cx231xx
*dev
,
475 struct cx231xx_dmaqueue
*dma_q
,
476 struct cx231xx_buffer
*buf
)
478 /* Advice that buffer was filled */
479 /* dev_dbg(dev->dev, "[%p/%d] wakeup\n", buf, buf->vb.index); */
481 buf
->vb
.sequence
= dma_q
->sequence
++;
482 buf
->vb
.vb2_buf
.timestamp
= ktime_get_ns();
484 dev
->vbi_mode
.bulk_ctl
.buf
= NULL
;
486 list_del(&buf
->list
);
487 vb2_buffer_done(&buf
->vb
.vb2_buf
, VB2_BUF_STATE_DONE
);
490 u32
cx231xx_copy_vbi_line(struct cx231xx
*dev
, struct cx231xx_dmaqueue
*dma_q
,
491 u8
*p_line
, u32 length
, int field_number
)
494 struct cx231xx_buffer
*buf
;
495 u32 _line_size
= dev
->width
* 2;
497 if (dma_q
->current_field
== -1) {
498 /* Just starting up */
499 cx231xx_reset_vbi_buffer(dev
, dma_q
);
502 if (dma_q
->current_field
!= field_number
)
503 dma_q
->lines_completed
= 0;
505 /* get the buffer pointer */
506 buf
= dev
->vbi_mode
.bulk_ctl
.buf
;
508 /* Remember the field number for next time */
509 dma_q
->current_field
= field_number
;
511 bytes_to_copy
= dma_q
->bytes_left_in_line
;
512 if (bytes_to_copy
> length
)
513 bytes_to_copy
= length
;
515 if (dma_q
->lines_completed
>= dma_q
->lines_per_field
) {
516 dma_q
->bytes_left_in_line
-= bytes_to_copy
;
517 dma_q
->is_partial_line
=
518 (dma_q
->bytes_left_in_line
== 0) ? 0 : 1;
522 dma_q
->is_partial_line
= 1;
524 /* If we don't have a buffer, just return the number of bytes we would
525 have copied if we had a buffer. */
527 dma_q
->bytes_left_in_line
-= bytes_to_copy
;
528 dma_q
->is_partial_line
=
529 (dma_q
->bytes_left_in_line
== 0) ? 0 : 1;
530 return bytes_to_copy
;
533 /* copy the data to video buffer */
534 cx231xx_do_vbi_copy(dev
, dma_q
, p_line
, bytes_to_copy
);
536 dma_q
->pos
+= bytes_to_copy
;
537 dma_q
->bytes_left_in_line
-= bytes_to_copy
;
539 if (dma_q
->bytes_left_in_line
== 0) {
541 dma_q
->bytes_left_in_line
= _line_size
;
542 dma_q
->lines_completed
++;
543 dma_q
->is_partial_line
= 0;
545 if (cx231xx_is_vbi_buffer_done(dev
, dma_q
) && buf
) {
547 vbi_buffer_filled(dev
, dma_q
, buf
);
550 dma_q
->lines_completed
= 0;
551 cx231xx_reset_vbi_buffer(dev
, dma_q
);
555 return bytes_to_copy
;
559 * generic routine to get the next available buffer
561 static inline void get_next_vbi_buf(struct cx231xx_dmaqueue
*dma_q
,
562 struct cx231xx_buffer
**buf
)
564 struct cx231xx_video_mode
*vmode
=
565 container_of(dma_q
, struct cx231xx_video_mode
, vidq
);
566 struct cx231xx
*dev
= container_of(vmode
, struct cx231xx
, vbi_mode
);
569 if (list_empty(&dma_q
->active
)) {
570 dev_err(dev
->dev
, "No active queue to serve\n");
571 dev
->vbi_mode
.bulk_ctl
.buf
= NULL
;
576 /* Get the next buffer */
577 *buf
= list_entry(dma_q
->active
.next
, struct cx231xx_buffer
, list
);
579 /* Cleans up buffer - Useful for testing for frame/URB loss */
580 outp
= vb2_plane_vaddr(&(*buf
)->vb
.vb2_buf
, 0);
581 memset(outp
, 0, vb2_plane_size(&(*buf
)->vb
.vb2_buf
, 0));
583 dev
->vbi_mode
.bulk_ctl
.buf
= *buf
;
588 void cx231xx_reset_vbi_buffer(struct cx231xx
*dev
,
589 struct cx231xx_dmaqueue
*dma_q
)
591 struct cx231xx_buffer
*buf
;
593 buf
= dev
->vbi_mode
.bulk_ctl
.buf
;
596 /* first try to get the buffer */
597 get_next_vbi_buf(dma_q
, &buf
);
600 dma_q
->current_field
= -1;
603 dma_q
->bytes_left_in_line
= dev
->width
<< 1;
604 dma_q
->lines_completed
= 0;
607 int cx231xx_do_vbi_copy(struct cx231xx
*dev
, struct cx231xx_dmaqueue
*dma_q
,
608 u8
*p_buffer
, u32 bytes_to_copy
)
610 u8
*p_out_buffer
= NULL
;
611 u32 current_line_bytes_copied
= 0;
612 struct cx231xx_buffer
*buf
;
613 u32 _line_size
= dev
->width
<< 1;
617 buf
= dev
->vbi_mode
.bulk_ctl
.buf
;
622 p_out_buffer
= vb2_plane_vaddr(&buf
->vb
.vb2_buf
, 0);
624 if (dma_q
->bytes_left_in_line
!= _line_size
) {
625 current_line_bytes_copied
=
626 _line_size
- dma_q
->bytes_left_in_line
;
629 offset
= (dma_q
->lines_completed
* _line_size
) +
630 current_line_bytes_copied
;
632 if (dma_q
->current_field
== 2) {
633 /* Populate the second half of the frame */
634 offset
+= (dev
->width
* 2 * dma_q
->lines_per_field
);
637 /* prepare destination address */
638 startwrite
= p_out_buffer
+ offset
;
640 lencopy
= dma_q
->bytes_left_in_line
> bytes_to_copy
?
641 bytes_to_copy
: dma_q
->bytes_left_in_line
;
643 memcpy(startwrite
, p_buffer
, lencopy
);
648 u8
cx231xx_is_vbi_buffer_done(struct cx231xx
*dev
,
649 struct cx231xx_dmaqueue
*dma_q
)
653 height
= ((dev
->norm
& V4L2_STD_625_50
) ?
654 PAL_VBI_LINES
: NTSC_VBI_LINES
);
655 if (dma_q
->lines_completed
== height
&& dma_q
->current_field
== 2)