2 cx231xx_vbi.c - driver for Conexant Cx23100/101/102 USB video capture devices
4 Copyright (C) 2008 <srinivasa.deevi at conexant dot com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/init.h>
24 #include <linux/list.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/bitmap.h>
28 #include <linux/i2c.h>
30 #include <linux/mutex.h>
31 #include <linux/slab.h>
33 #include <media/v4l2-common.h>
34 #include <media/v4l2-ioctl.h>
35 #include <media/drv-intf/msp3400.h>
36 #include <media/tuner.h>
38 #include "cx231xx-vbi.h"
40 static inline void print_err_status(struct cx231xx
*dev
, int packet
, int status
)
42 char *errmsg
= "Unknown";
46 errmsg
= "unlinked synchronously";
49 errmsg
= "unlinked asynchronously";
52 errmsg
= "Buffer error (overrun)";
55 errmsg
= "Stalled (device not responding)";
58 errmsg
= "Babble (bad cable?)";
61 errmsg
= "Bit-stuff error (bad cable?)";
64 errmsg
= "CRC/Timeout (could be anything)";
67 errmsg
= "Device does not respond";
72 "URB status %d [%s].\n", status
, errmsg
);
75 "URB packet %d, status %d [%s].\n",
76 packet
, status
, errmsg
);
81 * Controls the isoc copy of each urb packet
83 static inline int cx231xx_isoc_vbi_copy(struct cx231xx
*dev
, struct urb
*urb
)
85 struct cx231xx_dmaqueue
*dma_q
= urb
->context
;
87 unsigned char *p_buffer
;
88 u32 bytes_parsed
= 0, buffer_size
= 0;
94 if (dev
->state
& DEV_DISCONNECTED
)
97 if (urb
->status
< 0) {
98 print_err_status(dev
, -1, urb
->status
);
99 if (urb
->status
== -ENOENT
)
103 /* get buffer pointer and length */
104 p_buffer
= urb
->transfer_buffer
;
105 buffer_size
= urb
->actual_length
;
107 if (buffer_size
> 0) {
110 if (dma_q
->is_partial_line
) {
111 /* Handle the case where we were working on a partial
113 sav_eav
= dma_q
->last_sav
;
115 /* Check for a SAV/EAV overlapping the
118 sav_eav
= cx231xx_find_boundary_SAV_EAV(p_buffer
,
124 /* Get the first line if we have some portion of an SAV/EAV from
125 the last buffer or a partial line */
127 bytes_parsed
+= cx231xx_get_vbi_line(dev
, dma_q
,
128 sav_eav
, /* SAV/EAV */
129 p_buffer
+ bytes_parsed
, /* p_buffer */
130 buffer_size
- bytes_parsed
); /* buffer size */
133 /* Now parse data that is completely in this buffer */
134 dma_q
->is_partial_line
= 0;
136 while (bytes_parsed
< buffer_size
) {
139 sav_eav
= cx231xx_find_next_SAV_EAV(
140 p_buffer
+ bytes_parsed
, /* p_buffer */
141 buffer_size
- bytes_parsed
, /* buffer size */
142 &bytes_used
); /* bytes used to get SAV/EAV */
144 bytes_parsed
+= bytes_used
;
147 if (sav_eav
&& (bytes_parsed
< buffer_size
)) {
148 bytes_parsed
+= cx231xx_get_vbi_line(dev
,
149 dma_q
, sav_eav
, /* SAV/EAV */
150 p_buffer
+bytes_parsed
, /* p_buffer */
151 buffer_size
-bytes_parsed
);/*buf size*/
155 /* Save the last four bytes of the buffer so we can
156 check the buffer boundary condition next time */
157 memcpy(dma_q
->partial_buf
, p_buffer
+ buffer_size
- 4, 4);
164 /* ------------------------------------------------------------------
166 ------------------------------------------------------------------*/
169 vbi_buffer_setup(struct videobuf_queue
*vq
, unsigned int *count
,
172 struct cx231xx_fh
*fh
= vq
->priv_data
;
173 struct cx231xx
*dev
= fh
->dev
;
176 height
= ((dev
->norm
& V4L2_STD_625_50
) ?
177 PAL_VBI_LINES
: NTSC_VBI_LINES
);
179 *size
= (dev
->width
* height
* 2 * 2);
181 *count
= CX231XX_DEF_VBI_BUF
;
183 if (*count
< CX231XX_MIN_BUF
)
184 *count
= CX231XX_MIN_BUF
;
189 /* This is called *without* dev->slock held; please keep it that way */
190 static void free_buffer(struct videobuf_queue
*vq
, struct cx231xx_buffer
*buf
)
192 struct cx231xx_fh
*fh
= vq
->priv_data
;
193 struct cx231xx
*dev
= fh
->dev
;
194 unsigned long flags
= 0;
195 BUG_ON(in_interrupt());
197 /* We used to wait for the buffer to finish here, but this didn't work
198 because, as we were keeping the state as VIDEOBUF_QUEUED,
199 videobuf_queue_cancel marked it as finished for us.
200 (Also, it could wedge forever if the hardware was misconfigured.)
202 This should be safe; by the time we get here, the buffer isn't
203 queued anymore. If we ever start marking the buffers as
204 VIDEOBUF_ACTIVE, it won't be, though.
206 spin_lock_irqsave(&dev
->vbi_mode
.slock
, flags
);
207 if (dev
->vbi_mode
.bulk_ctl
.buf
== buf
)
208 dev
->vbi_mode
.bulk_ctl
.buf
= NULL
;
209 spin_unlock_irqrestore(&dev
->vbi_mode
.slock
, flags
);
211 videobuf_vmalloc_free(&buf
->vb
);
212 buf
->vb
.state
= VIDEOBUF_NEEDS_INIT
;
216 vbi_buffer_prepare(struct videobuf_queue
*vq
, struct videobuf_buffer
*vb
,
217 enum v4l2_field field
)
219 struct cx231xx_fh
*fh
= vq
->priv_data
;
220 struct cx231xx_buffer
*buf
=
221 container_of(vb
, struct cx231xx_buffer
, vb
);
222 struct cx231xx
*dev
= fh
->dev
;
223 int rc
= 0, urb_init
= 0;
226 height
= ((dev
->norm
& V4L2_STD_625_50
) ?
227 PAL_VBI_LINES
: NTSC_VBI_LINES
);
228 buf
->vb
.size
= ((dev
->width
<< 1) * height
* 2);
230 if (0 != buf
->vb
.baddr
&& buf
->vb
.bsize
< buf
->vb
.size
)
233 buf
->vb
.width
= dev
->width
;
234 buf
->vb
.height
= height
;
235 buf
->vb
.field
= field
;
236 buf
->vb
.field
= V4L2_FIELD_SEQ_TB
;
238 if (VIDEOBUF_NEEDS_INIT
== buf
->vb
.state
) {
239 rc
= videobuf_iolock(vq
, &buf
->vb
, NULL
);
244 if (!dev
->vbi_mode
.bulk_ctl
.num_bufs
)
248 rc
= cx231xx_init_vbi_isoc(dev
, CX231XX_NUM_VBI_PACKETS
,
249 CX231XX_NUM_VBI_BUFS
,
250 dev
->vbi_mode
.alt_max_pkt_size
[0],
251 cx231xx_isoc_vbi_copy
);
256 buf
->vb
.state
= VIDEOBUF_PREPARED
;
260 free_buffer(vq
, buf
);
265 vbi_buffer_queue(struct videobuf_queue
*vq
, struct videobuf_buffer
*vb
)
267 struct cx231xx_buffer
*buf
=
268 container_of(vb
, struct cx231xx_buffer
, vb
);
269 struct cx231xx_fh
*fh
= vq
->priv_data
;
270 struct cx231xx
*dev
= fh
->dev
;
271 struct cx231xx_dmaqueue
*vidq
= &dev
->vbi_mode
.vidq
;
273 buf
->vb
.state
= VIDEOBUF_QUEUED
;
274 list_add_tail(&buf
->vb
.queue
, &vidq
->active
);
278 static void vbi_buffer_release(struct videobuf_queue
*vq
,
279 struct videobuf_buffer
*vb
)
281 struct cx231xx_buffer
*buf
=
282 container_of(vb
, struct cx231xx_buffer
, vb
);
285 free_buffer(vq
, buf
);
288 const struct videobuf_queue_ops cx231xx_vbi_qops
= {
289 .buf_setup
= vbi_buffer_setup
,
290 .buf_prepare
= vbi_buffer_prepare
,
291 .buf_queue
= vbi_buffer_queue
,
292 .buf_release
= vbi_buffer_release
,
295 /* ------------------------------------------------------------------
297 ------------------------------------------------------------------*/
300 * IRQ callback, called by URB callback
302 static void cx231xx_irq_vbi_callback(struct urb
*urb
)
304 struct cx231xx_dmaqueue
*dma_q
= urb
->context
;
305 struct cx231xx_video_mode
*vmode
=
306 container_of(dma_q
, struct cx231xx_video_mode
, vidq
);
307 struct cx231xx
*dev
= container_of(vmode
, struct cx231xx
, vbi_mode
);
309 switch (urb
->status
) {
310 case 0: /* success */
311 case -ETIMEDOUT
: /* NAK */
313 case -ECONNRESET
: /* kill */
319 "urb completition error %d.\n", urb
->status
);
323 /* Copy data from URB */
324 spin_lock(&dev
->vbi_mode
.slock
);
325 dev
->vbi_mode
.bulk_ctl
.bulk_copy(dev
, urb
);
326 spin_unlock(&dev
->vbi_mode
.slock
);
331 urb
->status
= usb_submit_urb(urb
, GFP_ATOMIC
);
333 dev_err(dev
->dev
, "urb resubmit failed (error=%i)\n",
339 * Stop and Deallocate URBs
341 void cx231xx_uninit_vbi_isoc(struct cx231xx
*dev
)
346 dev_dbg(dev
->dev
, "called cx231xx_uninit_vbi_isoc\n");
348 dev
->vbi_mode
.bulk_ctl
.nfields
= -1;
349 for (i
= 0; i
< dev
->vbi_mode
.bulk_ctl
.num_bufs
; i
++) {
350 urb
= dev
->vbi_mode
.bulk_ctl
.urb
[i
];
352 if (!irqs_disabled())
357 if (dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
]) {
359 kfree(dev
->vbi_mode
.bulk_ctl
.
361 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
] =
365 dev
->vbi_mode
.bulk_ctl
.urb
[i
] = NULL
;
367 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
] = NULL
;
370 kfree(dev
->vbi_mode
.bulk_ctl
.urb
);
371 kfree(dev
->vbi_mode
.bulk_ctl
.transfer_buffer
);
373 dev
->vbi_mode
.bulk_ctl
.urb
= NULL
;
374 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
= NULL
;
375 dev
->vbi_mode
.bulk_ctl
.num_bufs
= 0;
377 cx231xx_capture_start(dev
, 0, Vbi
);
379 EXPORT_SYMBOL_GPL(cx231xx_uninit_vbi_isoc
);
382 * Allocate URBs and start IRQ
384 int cx231xx_init_vbi_isoc(struct cx231xx
*dev
, int max_packets
,
385 int num_bufs
, int max_pkt_size
,
386 int (*bulk_copy
) (struct cx231xx
*dev
,
389 struct cx231xx_dmaqueue
*dma_q
= &dev
->vbi_mode
.vidq
;
395 dev_dbg(dev
->dev
, "called cx231xx_vbi_isoc\n");
397 /* De-allocates all pending stuff */
398 cx231xx_uninit_vbi_isoc(dev
);
400 /* clear if any halt */
401 usb_clear_halt(dev
->udev
,
402 usb_rcvbulkpipe(dev
->udev
,
403 dev
->vbi_mode
.end_point_addr
));
405 dev
->vbi_mode
.bulk_ctl
.bulk_copy
= bulk_copy
;
406 dev
->vbi_mode
.bulk_ctl
.num_bufs
= num_bufs
;
408 dma_q
->is_partial_line
= 0;
410 dma_q
->current_field
= -1;
411 dma_q
->bytes_left_in_line
= dev
->width
<< 1;
412 dma_q
->lines_per_field
= ((dev
->norm
& V4L2_STD_625_50
) ?
413 PAL_VBI_LINES
: NTSC_VBI_LINES
);
414 dma_q
->lines_completed
= 0;
415 for (i
= 0; i
< 8; i
++)
416 dma_q
->partial_buf
[i
] = 0;
418 dev
->vbi_mode
.bulk_ctl
.urb
= kzalloc(sizeof(void *) * num_bufs
,
420 if (!dev
->vbi_mode
.bulk_ctl
.urb
) {
422 "cannot alloc memory for usb buffers\n");
426 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
=
427 kzalloc(sizeof(void *) * num_bufs
, GFP_KERNEL
);
428 if (!dev
->vbi_mode
.bulk_ctl
.transfer_buffer
) {
430 "cannot allocate memory for usbtransfer\n");
431 kfree(dev
->vbi_mode
.bulk_ctl
.urb
);
435 dev
->vbi_mode
.bulk_ctl
.max_pkt_size
= max_pkt_size
;
436 dev
->vbi_mode
.bulk_ctl
.buf
= NULL
;
438 sb_size
= max_packets
* dev
->vbi_mode
.bulk_ctl
.max_pkt_size
;
440 /* allocate urbs and transfer buffers */
441 for (i
= 0; i
< dev
->vbi_mode
.bulk_ctl
.num_bufs
; i
++) {
443 urb
= usb_alloc_urb(0, GFP_KERNEL
);
445 cx231xx_uninit_vbi_isoc(dev
);
448 dev
->vbi_mode
.bulk_ctl
.urb
[i
] = urb
;
449 urb
->transfer_flags
= 0;
451 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
] =
452 kzalloc(sb_size
, GFP_KERNEL
);
453 if (!dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
]) {
455 "unable to allocate %i bytes for transfer buffer %i%s\n",
457 in_interrupt() ? " while in int" : "");
458 cx231xx_uninit_vbi_isoc(dev
);
462 pipe
= usb_rcvbulkpipe(dev
->udev
, dev
->vbi_mode
.end_point_addr
);
463 usb_fill_bulk_urb(urb
, dev
->udev
, pipe
,
464 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
],
465 sb_size
, cx231xx_irq_vbi_callback
, dma_q
);
468 init_waitqueue_head(&dma_q
->wq
);
470 /* submit urbs and enables IRQ */
471 for (i
= 0; i
< dev
->vbi_mode
.bulk_ctl
.num_bufs
; i
++) {
472 rc
= usb_submit_urb(dev
->vbi_mode
.bulk_ctl
.urb
[i
], GFP_ATOMIC
);
475 "submit of urb %i failed (error=%i)\n", i
, rc
);
476 cx231xx_uninit_vbi_isoc(dev
);
481 cx231xx_capture_start(dev
, 1, Vbi
);
485 EXPORT_SYMBOL_GPL(cx231xx_init_vbi_isoc
);
487 u32
cx231xx_get_vbi_line(struct cx231xx
*dev
, struct cx231xx_dmaqueue
*dma_q
,
488 u8 sav_eav
, u8
*p_buffer
, u32 buffer_size
)
490 u32 bytes_copied
= 0;
491 int current_field
= -1;
506 if (current_field
< 0)
509 dma_q
->last_sav
= sav_eav
;
512 cx231xx_copy_vbi_line(dev
, dma_q
, p_buffer
, buffer_size
,
519 * Announces that a buffer were filled and request the next
521 static inline void vbi_buffer_filled(struct cx231xx
*dev
,
522 struct cx231xx_dmaqueue
*dma_q
,
523 struct cx231xx_buffer
*buf
)
525 /* Advice that buffer was filled */
526 /* dev_dbg(dev->dev, "[%p/%d] wakeup\n", buf, buf->vb.i); */
528 buf
->vb
.state
= VIDEOBUF_DONE
;
529 buf
->vb
.field_count
++;
530 v4l2_get_timestamp(&buf
->vb
.ts
);
532 dev
->vbi_mode
.bulk_ctl
.buf
= NULL
;
534 list_del(&buf
->vb
.queue
);
535 wake_up(&buf
->vb
.done
);
538 u32
cx231xx_copy_vbi_line(struct cx231xx
*dev
, struct cx231xx_dmaqueue
*dma_q
,
539 u8
*p_line
, u32 length
, int field_number
)
542 struct cx231xx_buffer
*buf
;
543 u32 _line_size
= dev
->width
* 2;
545 if (dma_q
->current_field
== -1) {
546 /* Just starting up */
547 cx231xx_reset_vbi_buffer(dev
, dma_q
);
550 if (dma_q
->current_field
!= field_number
)
551 dma_q
->lines_completed
= 0;
553 /* get the buffer pointer */
554 buf
= dev
->vbi_mode
.bulk_ctl
.buf
;
556 /* Remember the field number for next time */
557 dma_q
->current_field
= field_number
;
559 bytes_to_copy
= dma_q
->bytes_left_in_line
;
560 if (bytes_to_copy
> length
)
561 bytes_to_copy
= length
;
563 if (dma_q
->lines_completed
>= dma_q
->lines_per_field
) {
564 dma_q
->bytes_left_in_line
-= bytes_to_copy
;
565 dma_q
->is_partial_line
=
566 (dma_q
->bytes_left_in_line
== 0) ? 0 : 1;
570 dma_q
->is_partial_line
= 1;
572 /* If we don't have a buffer, just return the number of bytes we would
573 have copied if we had a buffer. */
575 dma_q
->bytes_left_in_line
-= bytes_to_copy
;
576 dma_q
->is_partial_line
=
577 (dma_q
->bytes_left_in_line
== 0) ? 0 : 1;
578 return bytes_to_copy
;
581 /* copy the data to video buffer */
582 cx231xx_do_vbi_copy(dev
, dma_q
, p_line
, bytes_to_copy
);
584 dma_q
->pos
+= bytes_to_copy
;
585 dma_q
->bytes_left_in_line
-= bytes_to_copy
;
587 if (dma_q
->bytes_left_in_line
== 0) {
589 dma_q
->bytes_left_in_line
= _line_size
;
590 dma_q
->lines_completed
++;
591 dma_q
->is_partial_line
= 0;
593 if (cx231xx_is_vbi_buffer_done(dev
, dma_q
) && buf
) {
595 vbi_buffer_filled(dev
, dma_q
, buf
);
598 dma_q
->lines_completed
= 0;
599 cx231xx_reset_vbi_buffer(dev
, dma_q
);
603 return bytes_to_copy
;
607 * video-buf generic routine to get the next available buffer
609 static inline void get_next_vbi_buf(struct cx231xx_dmaqueue
*dma_q
,
610 struct cx231xx_buffer
**buf
)
612 struct cx231xx_video_mode
*vmode
=
613 container_of(dma_q
, struct cx231xx_video_mode
, vidq
);
614 struct cx231xx
*dev
= container_of(vmode
, struct cx231xx
, vbi_mode
);
617 if (list_empty(&dma_q
->active
)) {
618 dev_err(dev
->dev
, "No active queue to serve\n");
619 dev
->vbi_mode
.bulk_ctl
.buf
= NULL
;
624 /* Get the next buffer */
625 *buf
= list_entry(dma_q
->active
.next
, struct cx231xx_buffer
, vb
.queue
);
627 /* Cleans up buffer - Useful for testing for frame/URB loss */
628 outp
= videobuf_to_vmalloc(&(*buf
)->vb
);
629 memset(outp
, 0, (*buf
)->vb
.size
);
631 dev
->vbi_mode
.bulk_ctl
.buf
= *buf
;
636 void cx231xx_reset_vbi_buffer(struct cx231xx
*dev
,
637 struct cx231xx_dmaqueue
*dma_q
)
639 struct cx231xx_buffer
*buf
;
641 buf
= dev
->vbi_mode
.bulk_ctl
.buf
;
644 /* first try to get the buffer */
645 get_next_vbi_buf(dma_q
, &buf
);
648 dma_q
->current_field
= -1;
651 dma_q
->bytes_left_in_line
= dev
->width
<< 1;
652 dma_q
->lines_completed
= 0;
655 int cx231xx_do_vbi_copy(struct cx231xx
*dev
, struct cx231xx_dmaqueue
*dma_q
,
656 u8
*p_buffer
, u32 bytes_to_copy
)
658 u8
*p_out_buffer
= NULL
;
659 u32 current_line_bytes_copied
= 0;
660 struct cx231xx_buffer
*buf
;
661 u32 _line_size
= dev
->width
<< 1;
665 buf
= dev
->vbi_mode
.bulk_ctl
.buf
;
670 p_out_buffer
= videobuf_to_vmalloc(&buf
->vb
);
672 if (dma_q
->bytes_left_in_line
!= _line_size
) {
673 current_line_bytes_copied
=
674 _line_size
- dma_q
->bytes_left_in_line
;
677 offset
= (dma_q
->lines_completed
* _line_size
) +
678 current_line_bytes_copied
;
680 if (dma_q
->current_field
== 2) {
681 /* Populate the second half of the frame */
682 offset
+= (dev
->width
* 2 * dma_q
->lines_per_field
);
685 /* prepare destination address */
686 startwrite
= p_out_buffer
+ offset
;
688 lencopy
= dma_q
->bytes_left_in_line
> bytes_to_copy
?
689 bytes_to_copy
: dma_q
->bytes_left_in_line
;
691 memcpy(startwrite
, p_buffer
, lencopy
);
696 u8
cx231xx_is_vbi_buffer_done(struct cx231xx
*dev
,
697 struct cx231xx_dmaqueue
*dma_q
)
701 height
= ((dev
->norm
& V4L2_STD_625_50
) ?
702 PAL_VBI_LINES
: NTSC_VBI_LINES
);
703 if (dma_q
->lines_completed
== height
&& dma_q
->current_field
== 2)