2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "ivtv-driver.h"
22 #include "ivtv-queue.h"
23 #include "ivtv-udma.h"
25 #include "ivtv-mailbox.h"
28 #include <media/v4l2-event.h>
30 #define DMA_MAGIC_COOKIE 0x000001fe
32 static void ivtv_dma_dec_start(struct ivtv_stream
*s
);
34 static const int ivtv_stream_map
[] = {
35 IVTV_ENC_STREAM_TYPE_MPG
,
36 IVTV_ENC_STREAM_TYPE_YUV
,
37 IVTV_ENC_STREAM_TYPE_PCM
,
38 IVTV_ENC_STREAM_TYPE_VBI
,
41 static void ivtv_pcm_work_handler(struct ivtv
*itv
)
43 struct ivtv_stream
*s
= &itv
->streams
[IVTV_ENC_STREAM_TYPE_PCM
];
44 struct ivtv_buffer
*buf
;
46 /* Pass the PCM data to ivtv-alsa */
50 * Users should not be using both the ALSA and V4L2 PCM audio
51 * capture interfaces at the same time. If the user is doing
52 * this, there maybe a buffer in q_io to grab, use, and put
55 buf
= ivtv_dequeue(s
, &s
->q_io
);
57 buf
= ivtv_dequeue(s
, &s
->q_full
);
61 if (buf
->readpos
< buf
->bytesused
)
62 itv
->pcm_announce_callback(itv
->alsa
,
63 (u8
*)(buf
->buf
+ buf
->readpos
),
64 (size_t)(buf
->bytesused
- buf
->readpos
));
66 ivtv_enqueue(s
, buf
, &s
->q_free
);
70 static void ivtv_pio_work_handler(struct ivtv
*itv
)
72 struct ivtv_stream
*s
= &itv
->streams
[itv
->cur_pio_stream
];
73 struct ivtv_buffer
*buf
;
76 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
77 if (itv
->cur_pio_stream
< 0 || itv
->cur_pio_stream
>= IVTV_MAX_STREAMS
||
78 s
->vdev
== NULL
|| !ivtv_use_pio(s
)) {
79 itv
->cur_pio_stream
= -1;
80 /* trigger PIO complete user interrupt */
81 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE
, 0x44);
84 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s
->name
);
85 list_for_each_entry(buf
, &s
->q_dma
.list
, list
) {
86 u32 size
= s
->sg_processing
[i
].size
& 0x3ffff;
88 /* Copy the data from the card to the buffer */
89 if (s
->type
== IVTV_DEC_STREAM_TYPE_VBI
) {
90 memcpy_fromio(buf
->buf
, itv
->dec_mem
+ s
->sg_processing
[i
].src
- IVTV_DECODER_OFFSET
, size
);
93 memcpy_fromio(buf
->buf
, itv
->enc_mem
+ s
->sg_processing
[i
].src
, size
);
96 if (i
== s
->sg_processing_size
)
99 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE
, 0x44);
102 void ivtv_irq_work_handler(struct kthread_work
*work
)
104 struct ivtv
*itv
= container_of(work
, struct ivtv
, irq_work
);
106 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO
, &itv
->i_flags
))
107 ivtv_pio_work_handler(itv
);
109 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI
, &itv
->i_flags
))
110 ivtv_vbi_work_handler(itv
);
112 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV
, &itv
->i_flags
))
113 ivtv_yuv_work_handler(itv
);
115 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PCM
, &itv
->i_flags
))
116 ivtv_pcm_work_handler(itv
);
119 /* Determine the required DMA size, setup enough buffers in the predma queue and
120 actually copy the data from the card to the buffers in case a PIO transfer is
121 required for this stream.
123 static int stream_enc_dma_append(struct ivtv_stream
*s
, u32 data
[CX2341X_MBOX_MAX_DATA
])
125 struct ivtv
*itv
= s
->itv
;
126 struct ivtv_buffer
*buf
;
127 u32 bytes_needed
= 0;
129 u32 UVoffset
= 0, UVsize
= 0;
130 int skip_bufs
= s
->q_predma
.buffers
;
131 int idx
= s
->sg_pending_size
;
135 if (s
->vdev
== NULL
) {
136 IVTV_DEBUG_WARN("Stream %s not started\n", s
->name
);
139 if (!test_bit(IVTV_F_S_CLAIMED
, &s
->s_flags
)) {
140 IVTV_DEBUG_WARN("Stream %s not open\n", s
->name
);
144 /* determine offset, size and PTS for the various streams */
146 case IVTV_ENC_STREAM_TYPE_MPG
:
152 case IVTV_ENC_STREAM_TYPE_YUV
:
157 s
->pending_pts
= ((u64
) data
[5] << 32) | data
[6];
160 case IVTV_ENC_STREAM_TYPE_PCM
:
161 offset
= data
[1] + 12;
163 s
->pending_pts
= read_dec(offset
- 8) |
164 ((u64
)(read_dec(offset
- 12)) << 32);
165 if (itv
->has_cx23415
)
166 offset
+= IVTV_DECODER_OFFSET
;
169 case IVTV_ENC_STREAM_TYPE_VBI
:
170 size
= itv
->vbi
.enc_size
* itv
->vbi
.fpi
;
171 offset
= read_enc(itv
->vbi
.enc_start
- 4) + 12;
173 IVTV_DEBUG_INFO("VBI offset == 0\n");
176 s
->pending_pts
= read_enc(offset
- 4) | ((u64
)read_enc(offset
- 8) << 32);
179 case IVTV_DEC_STREAM_TYPE_VBI
:
180 size
= read_dec(itv
->vbi
.dec_start
+ 4) + 8;
181 offset
= read_dec(itv
->vbi
.dec_start
) + itv
->vbi
.dec_start
;
183 offset
+= IVTV_DECODER_OFFSET
;
186 /* shouldn't happen */
190 /* if this is the start of the DMA then fill in the magic cookie */
191 if (s
->sg_pending_size
== 0 && ivtv_use_dma(s
)) {
192 if (itv
->has_cx23415
&& (s
->type
== IVTV_ENC_STREAM_TYPE_PCM
||
193 s
->type
== IVTV_DEC_STREAM_TYPE_VBI
)) {
194 s
->pending_backup
= read_dec(offset
- IVTV_DECODER_OFFSET
);
195 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE
), offset
- IVTV_DECODER_OFFSET
);
198 s
->pending_backup
= read_enc(offset
);
199 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE
), offset
);
201 s
->pending_offset
= offset
;
205 if (s
->type
== IVTV_ENC_STREAM_TYPE_YUV
) {
206 /* The size for the Y samples needs to be rounded upwards to a
207 multiple of the buf_size. The UV samples then start in the
209 bytes_needed
= s
->buf_size
* ((bytes_needed
+ s
->buf_size
- 1) / s
->buf_size
);
210 bytes_needed
+= UVsize
;
213 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
214 ivtv_use_pio(s
) ? "PIO" : "DMA", s
->name
, bytes_needed
, offset
);
216 rc
= ivtv_queue_move(s
, &s
->q_free
, &s
->q_full
, &s
->q_predma
, bytes_needed
);
217 if (rc
< 0) { /* Insufficient buffers */
218 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
219 bytes_needed
, s
->name
);
222 if (rc
&& !s
->buffers_stolen
&& test_bit(IVTV_F_S_APPL_IO
, &s
->s_flags
)) {
223 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s
->name
);
224 IVTV_WARN("Cause: the application is not reading fast enough.\n");
226 s
->buffers_stolen
= rc
;
228 /* got the buffers, now fill in sg_pending */
229 buf
= list_entry(s
->q_predma
.list
.next
, struct ivtv_buffer
, list
);
230 memset(buf
->buf
, 0, 128);
231 list_for_each_entry(buf
, &s
->q_predma
.list
, list
) {
234 s
->sg_pending
[idx
].dst
= buf
->dma_handle
;
235 s
->sg_pending
[idx
].src
= offset
;
236 s
->sg_pending
[idx
].size
= s
->buf_size
;
237 buf
->bytesused
= min(size
, s
->buf_size
);
238 buf
->dma_xfer_cnt
= s
->dma_xfer_cnt
;
240 s
->q_predma
.bytesused
+= buf
->bytesused
;
241 size
-= buf
->bytesused
;
242 offset
+= s
->buf_size
;
244 /* Sync SG buffers */
245 ivtv_buf_sync_for_device(s
, buf
);
247 if (size
== 0) { /* YUV */
248 /* process the UV section */
254 s
->sg_pending_size
= idx
;
258 static void dma_post(struct ivtv_stream
*s
)
260 struct ivtv
*itv
= s
->itv
;
261 struct ivtv_buffer
*buf
= NULL
;
267 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s
) ? "PIO" : "DMA",
268 s
->name
, s
->dma_offset
);
269 list_for_each(p
, &s
->q_dma
.list
) {
270 buf
= list_entry(p
, struct ivtv_buffer
, list
);
271 u32buf
= (__le32
*)buf
->buf
;
274 ivtv_buf_sync_for_cpu(s
, buf
);
276 if (x
== 0 && ivtv_use_dma(s
)) {
277 offset
= s
->dma_last_offset
;
278 if (u32buf
[offset
/ 4] != DMA_MAGIC_COOKIE
)
280 for (offset
= 0; offset
< 64; offset
++) {
281 if (u32buf
[offset
] == DMA_MAGIC_COOKIE
) {
287 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s
->name
);
288 offset
= s
->dma_last_offset
;
290 if (s
->dma_last_offset
!= offset
)
291 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s
->name
, s
->dma_last_offset
, offset
);
292 s
->dma_last_offset
= offset
;
294 if (itv
->has_cx23415
&& (s
->type
== IVTV_ENC_STREAM_TYPE_PCM
||
295 s
->type
== IVTV_DEC_STREAM_TYPE_VBI
)) {
296 write_dec_sync(0, s
->dma_offset
- IVTV_DECODER_OFFSET
);
299 write_enc_sync(0, s
->dma_offset
);
302 buf
->bytesused
-= offset
;
303 memcpy(buf
->buf
, buf
->buf
+ offset
, buf
->bytesused
+ offset
);
305 *u32buf
= cpu_to_le32(s
->dma_backup
);
308 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
309 if (s
->type
== IVTV_ENC_STREAM_TYPE_MPG
||
310 s
->type
== IVTV_ENC_STREAM_TYPE_VBI
)
311 buf
->b_flags
|= IVTV_F_B_NEED_BUF_SWAP
;
314 buf
->bytesused
+= s
->dma_last_offset
;
315 if (buf
&& s
->type
== IVTV_DEC_STREAM_TYPE_VBI
) {
316 list_for_each_entry(buf
, &s
->q_dma
.list
, list
) {
317 /* Parse and Groom VBI Data */
318 s
->q_dma
.bytesused
-= buf
->bytesused
;
319 ivtv_process_vbi_data(itv
, buf
, 0, s
->type
);
320 s
->q_dma
.bytesused
+= buf
->bytesused
;
323 ivtv_queue_move(s
, &s
->q_dma
, NULL
, &s
->q_free
, 0);
328 ivtv_queue_move(s
, &s
->q_dma
, NULL
, &s
->q_full
, s
->q_dma
.bytesused
);
330 if (s
->type
== IVTV_ENC_STREAM_TYPE_PCM
&&
331 itv
->pcm_announce_callback
!= NULL
) {
333 * Set up the work handler to pass the data to ivtv-alsa.
335 * We just use q_full and let the work handler race with users
336 * making ivtv-fileops.c calls on the PCM device node.
338 * Users should not be using both the ALSA and V4L2 PCM audio
339 * capture interfaces at the same time. If the user does this,
340 * fragments of data will just go out each interface as they
343 set_bit(IVTV_F_I_WORK_HANDLER_PCM
, &itv
->i_flags
);
344 set_bit(IVTV_F_I_HAVE_WORK
, &itv
->i_flags
);
351 void ivtv_dma_stream_dec_prepare(struct ivtv_stream
*s
, u32 offset
, int lock
)
353 struct ivtv
*itv
= s
->itv
;
354 struct yuv_playback_info
*yi
= &itv
->yuv_info
;
355 u8 frame
= yi
->draw_frame
;
356 struct yuv_frame_info
*f
= &yi
->new_frame_info
[frame
];
357 struct ivtv_buffer
*buf
;
358 u32 y_size
= 720 * ((f
->src_h
+ 31) & ~31);
359 u32 uv_offset
= offset
+ IVTV_YUV_BUFFER_UV_OFFSET
;
361 int bytes_written
= 0;
362 unsigned long flags
= 0;
365 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s
->name
, s
->q_predma
.bytesused
, offset
);
367 /* Insert buffer block for YUV if needed */
368 if (s
->type
== IVTV_DEC_STREAM_TYPE_YUV
&& f
->offset_y
) {
369 if (yi
->blanking_dmaptr
) {
370 s
->sg_pending
[idx
].src
= yi
->blanking_dmaptr
;
371 s
->sg_pending
[idx
].dst
= offset
;
372 s
->sg_pending
[idx
].size
= 720 * 16;
378 list_for_each_entry(buf
, &s
->q_predma
.list
, list
) {
379 /* YUV UV Offset from Y Buffer */
380 if (s
->type
== IVTV_DEC_STREAM_TYPE_YUV
&& !y_done
&&
381 (bytes_written
+ buf
->bytesused
) >= y_size
) {
382 s
->sg_pending
[idx
].src
= buf
->dma_handle
;
383 s
->sg_pending
[idx
].dst
= offset
;
384 s
->sg_pending
[idx
].size
= y_size
- bytes_written
;
386 if (s
->sg_pending
[idx
].size
!= buf
->bytesused
) {
388 s
->sg_pending
[idx
].src
=
389 buf
->dma_handle
+ s
->sg_pending
[idx
- 1].size
;
390 s
->sg_pending
[idx
].dst
= offset
;
391 s
->sg_pending
[idx
].size
=
392 buf
->bytesused
- s
->sg_pending
[idx
- 1].size
;
393 offset
+= s
->sg_pending
[idx
].size
;
397 s
->sg_pending
[idx
].src
= buf
->dma_handle
;
398 s
->sg_pending
[idx
].dst
= offset
;
399 s
->sg_pending
[idx
].size
= buf
->bytesused
;
400 offset
+= buf
->bytesused
;
402 bytes_written
+= buf
->bytesused
;
404 /* Sync SG buffers */
405 ivtv_buf_sync_for_device(s
, buf
);
408 s
->sg_pending_size
= idx
;
410 /* Sync Hardware SG List of buffers */
411 ivtv_stream_sync_for_device(s
);
413 spin_lock_irqsave(&itv
->dma_reg_lock
, flags
);
414 if (!test_bit(IVTV_F_I_DMA
, &itv
->i_flags
)) {
415 ivtv_dma_dec_start(s
);
418 set_bit(IVTV_F_S_DMA_PENDING
, &s
->s_flags
);
421 spin_unlock_irqrestore(&itv
->dma_reg_lock
, flags
);
424 static void ivtv_dma_enc_start_xfer(struct ivtv_stream
*s
)
426 struct ivtv
*itv
= s
->itv
;
428 s
->sg_dma
->src
= cpu_to_le32(s
->sg_processing
[s
->sg_processed
].src
);
429 s
->sg_dma
->dst
= cpu_to_le32(s
->sg_processing
[s
->sg_processed
].dst
);
430 s
->sg_dma
->size
= cpu_to_le32(s
->sg_processing
[s
->sg_processed
].size
| 0x80000000);
432 /* Sync Hardware SG List of buffers */
433 ivtv_stream_sync_for_device(s
);
434 write_reg(s
->sg_handle
, IVTV_REG_ENCDMAADDR
);
435 write_reg_sync(read_reg(IVTV_REG_DMAXFER
) | 0x02, IVTV_REG_DMAXFER
);
436 itv
->dma_timer
.expires
= jiffies
+ msecs_to_jiffies(300);
437 add_timer(&itv
->dma_timer
);
440 static void ivtv_dma_dec_start_xfer(struct ivtv_stream
*s
)
442 struct ivtv
*itv
= s
->itv
;
444 s
->sg_dma
->src
= cpu_to_le32(s
->sg_processing
[s
->sg_processed
].src
);
445 s
->sg_dma
->dst
= cpu_to_le32(s
->sg_processing
[s
->sg_processed
].dst
);
446 s
->sg_dma
->size
= cpu_to_le32(s
->sg_processing
[s
->sg_processed
].size
| 0x80000000);
448 /* Sync Hardware SG List of buffers */
449 ivtv_stream_sync_for_device(s
);
450 write_reg(s
->sg_handle
, IVTV_REG_DECDMAADDR
);
451 write_reg_sync(read_reg(IVTV_REG_DMAXFER
) | 0x01, IVTV_REG_DMAXFER
);
452 itv
->dma_timer
.expires
= jiffies
+ msecs_to_jiffies(300);
453 add_timer(&itv
->dma_timer
);
456 /* start the encoder DMA */
457 static void ivtv_dma_enc_start(struct ivtv_stream
*s
)
459 struct ivtv
*itv
= s
->itv
;
460 struct ivtv_stream
*s_vbi
= &itv
->streams
[IVTV_ENC_STREAM_TYPE_VBI
];
463 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s
) ? "DMA" : "PIO", s
->name
);
465 if (s
->q_predma
.bytesused
)
466 ivtv_queue_move(s
, &s
->q_predma
, NULL
, &s
->q_dma
, s
->q_predma
.bytesused
);
469 s
->sg_pending
[s
->sg_pending_size
- 1].size
+= 256;
471 /* If this is an MPEG stream, and VBI data is also pending, then append the
472 VBI DMA to the MPEG DMA and transfer both sets of data at once.
474 VBI DMA is a second class citizen compared to MPEG and mixing them together
475 will confuse the firmware (the end of a VBI DMA is seen as the end of a
476 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
477 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
478 use. This way no conflicts occur. */
479 clear_bit(IVTV_F_S_DMA_HAS_VBI
, &s
->s_flags
);
480 if (s
->type
== IVTV_ENC_STREAM_TYPE_MPG
&& s_vbi
->sg_pending_size
&&
481 s
->sg_pending_size
+ s_vbi
->sg_pending_size
<= s
->buffers
) {
482 ivtv_queue_move(s_vbi
, &s_vbi
->q_predma
, NULL
, &s_vbi
->q_dma
, s_vbi
->q_predma
.bytesused
);
483 if (ivtv_use_dma(s_vbi
))
484 s_vbi
->sg_pending
[s_vbi
->sg_pending_size
- 1].size
+= 256;
485 for (i
= 0; i
< s_vbi
->sg_pending_size
; i
++) {
486 s
->sg_pending
[s
->sg_pending_size
++] = s_vbi
->sg_pending
[i
];
488 s_vbi
->dma_offset
= s_vbi
->pending_offset
;
489 s_vbi
->sg_pending_size
= 0;
490 s_vbi
->dma_xfer_cnt
++;
491 set_bit(IVTV_F_S_DMA_HAS_VBI
, &s
->s_flags
);
492 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi
->name
);
496 memcpy(s
->sg_processing
, s
->sg_pending
, sizeof(struct ivtv_sg_host_element
) * s
->sg_pending_size
);
497 s
->sg_processing_size
= s
->sg_pending_size
;
498 s
->sg_pending_size
= 0;
500 s
->dma_offset
= s
->pending_offset
;
501 s
->dma_backup
= s
->pending_backup
;
502 s
->dma_pts
= s
->pending_pts
;
504 if (ivtv_use_pio(s
)) {
505 set_bit(IVTV_F_I_WORK_HANDLER_PIO
, &itv
->i_flags
);
506 set_bit(IVTV_F_I_HAVE_WORK
, &itv
->i_flags
);
507 set_bit(IVTV_F_I_PIO
, &itv
->i_flags
);
508 itv
->cur_pio_stream
= s
->type
;
511 itv
->dma_retries
= 0;
512 ivtv_dma_enc_start_xfer(s
);
513 set_bit(IVTV_F_I_DMA
, &itv
->i_flags
);
514 itv
->cur_dma_stream
= s
->type
;
518 static void ivtv_dma_dec_start(struct ivtv_stream
*s
)
520 struct ivtv
*itv
= s
->itv
;
522 if (s
->q_predma
.bytesused
)
523 ivtv_queue_move(s
, &s
->q_predma
, NULL
, &s
->q_dma
, s
->q_predma
.bytesused
);
525 memcpy(s
->sg_processing
, s
->sg_pending
, sizeof(struct ivtv_sg_host_element
) * s
->sg_pending_size
);
526 s
->sg_processing_size
= s
->sg_pending_size
;
527 s
->sg_pending_size
= 0;
530 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s
->name
);
531 itv
->dma_retries
= 0;
532 ivtv_dma_dec_start_xfer(s
);
533 set_bit(IVTV_F_I_DMA
, &itv
->i_flags
);
534 itv
->cur_dma_stream
= s
->type
;
537 static void ivtv_irq_dma_read(struct ivtv
*itv
)
539 struct ivtv_stream
*s
= NULL
;
540 struct ivtv_buffer
*buf
;
541 int hw_stream_type
= 0;
543 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
545 del_timer(&itv
->dma_timer
);
547 if (!test_bit(IVTV_F_I_UDMA
, &itv
->i_flags
) && itv
->cur_dma_stream
< 0)
550 if (!test_bit(IVTV_F_I_UDMA
, &itv
->i_flags
)) {
551 s
= &itv
->streams
[itv
->cur_dma_stream
];
552 ivtv_stream_sync_for_cpu(s
);
554 if (read_reg(IVTV_REG_DMASTATUS
) & 0x14) {
555 IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
556 read_reg(IVTV_REG_DMASTATUS
),
557 s
->sg_processed
, s
->sg_processing_size
, itv
->dma_retries
);
558 write_reg(read_reg(IVTV_REG_DMASTATUS
) & 3, IVTV_REG_DMASTATUS
);
559 if (itv
->dma_retries
== 3) {
560 /* Too many retries, give up on this frame */
561 itv
->dma_retries
= 0;
562 s
->sg_processed
= s
->sg_processing_size
;
565 /* Retry, starting with the first xfer segment.
566 Just retrying the current segment is not sufficient. */
571 if (s
->sg_processed
< s
->sg_processing_size
) {
572 /* DMA next buffer */
573 ivtv_dma_dec_start_xfer(s
);
576 if (s
->type
== IVTV_DEC_STREAM_TYPE_YUV
)
578 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s
->name
, s
->q_dma
.bytesused
);
580 /* For some reason must kick the firmware, like PIO mode,
581 I think this tells the firmware we are done and the size
582 of the xfer so it can calculate what we need next.
583 I think we can do this part ourselves but would have to
584 fully calculate xfer info ourselves and not use interrupts
586 ivtv_vapi(itv
, CX2341X_DEC_SCHED_DMA_FROM_HOST
, 3, 0, s
->q_dma
.bytesused
,
589 /* Free last DMA call */
590 while ((buf
= ivtv_dequeue(s
, &s
->q_dma
)) != NULL
) {
591 ivtv_buf_sync_for_cpu(s
, buf
);
592 ivtv_enqueue(s
, buf
, &s
->q_free
);
596 clear_bit(IVTV_F_I_UDMA
, &itv
->i_flags
);
597 clear_bit(IVTV_F_I_DMA
, &itv
->i_flags
);
598 itv
->cur_dma_stream
= -1;
599 wake_up(&itv
->dma_waitq
);
602 static void ivtv_irq_enc_dma_complete(struct ivtv
*itv
)
604 u32 data
[CX2341X_MBOX_MAX_DATA
];
605 struct ivtv_stream
*s
;
607 ivtv_api_get_data(&itv
->enc_mbox
, IVTV_MBOX_DMA_END
, 2, data
);
608 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data
[0], data
[1], itv
->cur_dma_stream
);
610 del_timer(&itv
->dma_timer
);
612 if (itv
->cur_dma_stream
< 0)
615 s
= &itv
->streams
[itv
->cur_dma_stream
];
616 ivtv_stream_sync_for_cpu(s
);
618 if (data
[0] & 0x18) {
619 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data
[0],
620 s
->dma_offset
, s
->sg_processed
, s
->sg_processing_size
, itv
->dma_retries
);
621 write_reg(read_reg(IVTV_REG_DMASTATUS
) & 3, IVTV_REG_DMASTATUS
);
622 if (itv
->dma_retries
== 3) {
623 /* Too many retries, give up on this frame */
624 itv
->dma_retries
= 0;
625 s
->sg_processed
= s
->sg_processing_size
;
628 /* Retry, starting with the first xfer segment.
629 Just retrying the current segment is not sufficient. */
634 if (s
->sg_processed
< s
->sg_processing_size
) {
635 /* DMA next buffer */
636 ivtv_dma_enc_start_xfer(s
);
639 clear_bit(IVTV_F_I_DMA
, &itv
->i_flags
);
640 itv
->cur_dma_stream
= -1;
642 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI
, &s
->s_flags
)) {
643 s
= &itv
->streams
[IVTV_ENC_STREAM_TYPE_VBI
];
646 s
->sg_processing_size
= 0;
648 wake_up(&itv
->dma_waitq
);
651 static void ivtv_irq_enc_pio_complete(struct ivtv
*itv
)
653 struct ivtv_stream
*s
;
655 if (itv
->cur_pio_stream
< 0 || itv
->cur_pio_stream
>= IVTV_MAX_STREAMS
) {
656 itv
->cur_pio_stream
= -1;
659 s
= &itv
->streams
[itv
->cur_pio_stream
];
660 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s
->name
);
661 clear_bit(IVTV_F_I_PIO
, &itv
->i_flags
);
662 itv
->cur_pio_stream
= -1;
664 if (s
->type
== IVTV_ENC_STREAM_TYPE_MPG
)
665 ivtv_vapi(itv
, CX2341X_ENC_SCHED_DMA_TO_HOST
, 3, 0, 0, 0);
666 else if (s
->type
== IVTV_ENC_STREAM_TYPE_YUV
)
667 ivtv_vapi(itv
, CX2341X_ENC_SCHED_DMA_TO_HOST
, 3, 0, 0, 1);
668 else if (s
->type
== IVTV_ENC_STREAM_TYPE_PCM
)
669 ivtv_vapi(itv
, CX2341X_ENC_SCHED_DMA_TO_HOST
, 3, 0, 0, 2);
670 clear_bit(IVTV_F_I_PIO
, &itv
->i_flags
);
671 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI
, &s
->s_flags
)) {
672 s
= &itv
->streams
[IVTV_ENC_STREAM_TYPE_VBI
];
675 wake_up(&itv
->dma_waitq
);
678 static void ivtv_irq_dma_err(struct ivtv
*itv
)
680 u32 data
[CX2341X_MBOX_MAX_DATA
];
683 del_timer(&itv
->dma_timer
);
685 ivtv_api_get_data(&itv
->enc_mbox
, IVTV_MBOX_DMA_END
, 2, data
);
686 status
= read_reg(IVTV_REG_DMASTATUS
);
687 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data
[0], data
[1],
688 status
, itv
->cur_dma_stream
);
690 * We do *not* write back to the IVTV_REG_DMASTATUS register to
691 * clear the error status, if either the encoder write (0x02) or
692 * decoder read (0x01) bus master DMA operation do not indicate
693 * completed. We can race with the DMA engine, which may have
694 * transitioned to completed status *after* we read the register.
695 * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the
696 * DMA engine has completed, will cause the DMA engine to stop working.
700 write_reg(status
, IVTV_REG_DMASTATUS
);
702 if (!test_bit(IVTV_F_I_UDMA
, &itv
->i_flags
) &&
703 itv
->cur_dma_stream
>= 0 && itv
->cur_dma_stream
< IVTV_MAX_STREAMS
) {
704 struct ivtv_stream
*s
= &itv
->streams
[itv
->cur_dma_stream
];
706 if (s
->type
>= IVTV_DEC_STREAM_TYPE_MPG
) {
709 * FIXME - handle cases of DMA error similar to
710 * encoder below, except conditioned on status & 0x1
712 ivtv_dma_dec_start(s
);
715 if ((status
& 0x2) == 0) {
717 * CX2341x Bus Master DMA write is ongoing.
718 * Reset the timer and let it complete.
720 itv
->dma_timer
.expires
=
721 jiffies
+ msecs_to_jiffies(600);
722 add_timer(&itv
->dma_timer
);
726 if (itv
->dma_retries
< 3) {
728 * CX2341x Bus Master DMA write has ended.
729 * Retry the write, starting with the first
730 * xfer segment. Just retrying the current
731 * segment is not sufficient.
735 ivtv_dma_enc_start_xfer(s
);
738 /* Too many retries, give up on this one */
742 if (test_bit(IVTV_F_I_UDMA
, &itv
->i_flags
)) {
743 ivtv_udma_start(itv
);
746 clear_bit(IVTV_F_I_UDMA
, &itv
->i_flags
);
747 clear_bit(IVTV_F_I_DMA
, &itv
->i_flags
);
748 itv
->cur_dma_stream
= -1;
749 wake_up(&itv
->dma_waitq
);
752 static void ivtv_irq_enc_start_cap(struct ivtv
*itv
)
754 u32 data
[CX2341X_MBOX_MAX_DATA
];
755 struct ivtv_stream
*s
;
757 /* Get DMA destination and size arguments from card */
758 ivtv_api_get_data(&itv
->enc_mbox
, IVTV_MBOX_DMA
, 7, data
);
759 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data
[0], data
[1], data
[2]);
761 if (data
[0] > 2 || data
[1] == 0 || data
[2] == 0) {
762 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
763 data
[0], data
[1], data
[2]);
766 s
= &itv
->streams
[ivtv_stream_map
[data
[0]]];
767 if (!stream_enc_dma_append(s
, data
)) {
768 set_bit(ivtv_use_pio(s
) ? IVTV_F_S_PIO_PENDING
: IVTV_F_S_DMA_PENDING
, &s
->s_flags
);
772 static void ivtv_irq_enc_vbi_cap(struct ivtv
*itv
)
774 u32 data
[CX2341X_MBOX_MAX_DATA
];
775 struct ivtv_stream
*s
;
777 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
778 s
= &itv
->streams
[IVTV_ENC_STREAM_TYPE_VBI
];
780 if (!stream_enc_dma_append(s
, data
))
781 set_bit(ivtv_use_pio(s
) ? IVTV_F_S_PIO_PENDING
: IVTV_F_S_DMA_PENDING
, &s
->s_flags
);
784 static void ivtv_irq_dec_vbi_reinsert(struct ivtv
*itv
)
786 u32 data
[CX2341X_MBOX_MAX_DATA
];
787 struct ivtv_stream
*s
= &itv
->streams
[IVTV_DEC_STREAM_TYPE_VBI
];
789 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
790 if (test_bit(IVTV_F_S_CLAIMED
, &s
->s_flags
) &&
791 !stream_enc_dma_append(s
, data
)) {
792 set_bit(IVTV_F_S_PIO_PENDING
, &s
->s_flags
);
796 static void ivtv_irq_dec_data_req(struct ivtv
*itv
)
798 u32 data
[CX2341X_MBOX_MAX_DATA
];
799 struct ivtv_stream
*s
;
803 if (test_bit(IVTV_F_I_DEC_YUV
, &itv
->i_flags
)) {
804 ivtv_api_get_data(&itv
->dec_mbox
, IVTV_MBOX_DMA
, 2, data
);
805 itv
->dma_data_req_size
=
806 1080 * ((itv
->yuv_info
.v4l2_src_h
+ 31) & ~31);
807 itv
->dma_data_req_offset
= data
[1];
808 if (atomic_read(&itv
->yuv_info
.next_dma_frame
) >= 0)
809 ivtv_yuv_frame_complete(itv
);
810 s
= &itv
->streams
[IVTV_DEC_STREAM_TYPE_YUV
];
813 ivtv_api_get_data(&itv
->dec_mbox
, IVTV_MBOX_DMA
, 3, data
);
814 itv
->dma_data_req_size
= min_t(u32
, data
[2], 0x10000);
815 itv
->dma_data_req_offset
= data
[1];
816 s
= &itv
->streams
[IVTV_DEC_STREAM_TYPE_MPG
];
818 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s
->name
, s
->q_full
.bytesused
,
819 itv
->dma_data_req_offset
, itv
->dma_data_req_size
);
820 if (itv
->dma_data_req_size
== 0 || s
->q_full
.bytesused
< itv
->dma_data_req_size
) {
821 set_bit(IVTV_F_S_NEEDS_DATA
, &s
->s_flags
);
824 if (test_bit(IVTV_F_I_DEC_YUV
, &itv
->i_flags
))
825 ivtv_yuv_setup_stream_frame(itv
);
826 clear_bit(IVTV_F_S_NEEDS_DATA
, &s
->s_flags
);
827 ivtv_queue_move(s
, &s
->q_full
, NULL
, &s
->q_predma
, itv
->dma_data_req_size
);
828 ivtv_dma_stream_dec_prepare(s
, itv
->dma_data_req_offset
+ IVTV_DECODER_OFFSET
, 0);
832 static void ivtv_irq_vsync(struct ivtv
*itv
)
834 /* The vsync interrupt is unusual in that it won't clear until
835 * the end of the first line for the current field, at which
836 * point it clears itself. This can result in repeated vsync
837 * interrupts, or a missed vsync. Read some of the registers
838 * to determine the line being displayed and ensure we handle
839 * one vsync per frame.
841 unsigned int frame
= read_reg(IVTV_REG_DEC_LINE_FIELD
) & 1;
842 struct yuv_playback_info
*yi
= &itv
->yuv_info
;
843 int last_dma_frame
= atomic_read(&yi
->next_dma_frame
);
844 struct yuv_frame_info
*f
= &yi
->new_frame_info
[last_dma_frame
];
846 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
848 if (((frame
^ f
->sync_field
) == 0 &&
849 ((itv
->last_vsync_field
& 1) ^ f
->sync_field
)) ||
850 (frame
!= (itv
->last_vsync_field
& 1) && !f
->interlaced
)) {
851 int next_dma_frame
= last_dma_frame
;
853 if (!(f
->interlaced
&& f
->delay
&& yi
->fields_lapsed
< 1)) {
854 if (next_dma_frame
>= 0 && next_dma_frame
!= atomic_read(&yi
->next_fill_frame
)) {
855 write_reg(yuv_offset
[next_dma_frame
] >> 4, 0x82c);
856 write_reg((yuv_offset
[next_dma_frame
] + IVTV_YUV_BUFFER_UV_OFFSET
) >> 4, 0x830);
857 write_reg(yuv_offset
[next_dma_frame
] >> 4, 0x834);
858 write_reg((yuv_offset
[next_dma_frame
] + IVTV_YUV_BUFFER_UV_OFFSET
) >> 4, 0x838);
859 next_dma_frame
= (next_dma_frame
+ 1) % IVTV_YUV_BUFFERS
;
860 atomic_set(&yi
->next_dma_frame
, next_dma_frame
);
861 yi
->fields_lapsed
= -1;
866 if (frame
!= (itv
->last_vsync_field
& 1)) {
867 static const struct v4l2_event evtop
= {
868 .type
= V4L2_EVENT_VSYNC
,
869 .u
.vsync
.field
= V4L2_FIELD_TOP
,
871 static const struct v4l2_event evbottom
= {
872 .type
= V4L2_EVENT_VSYNC
,
873 .u
.vsync
.field
= V4L2_FIELD_BOTTOM
,
875 struct ivtv_stream
*s
= ivtv_get_output_stream(itv
);
877 itv
->last_vsync_field
+= 1;
879 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS
, &itv
->i_flags
);
880 clear_bit(IVTV_F_I_EV_VSYNC_FIELD
, &itv
->i_flags
);
883 set_bit(IVTV_F_I_EV_VSYNC_FIELD
, &itv
->i_flags
);
885 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED
, &itv
->i_flags
)) {
886 set_bit(IVTV_F_I_EV_VSYNC
, &itv
->i_flags
);
887 wake_up(&itv
->event_waitq
);
892 v4l2_event_queue(s
->vdev
, frame
? &evtop
: &evbottom
);
893 wake_up(&itv
->vsync_waitq
);
895 /* Send VBI to saa7127 */
896 if (frame
&& (itv
->output_mode
== OUT_PASSTHROUGH
||
897 test_bit(IVTV_F_I_UPDATE_WSS
, &itv
->i_flags
) ||
898 test_bit(IVTV_F_I_UPDATE_VPS
, &itv
->i_flags
) ||
899 test_bit(IVTV_F_I_UPDATE_CC
, &itv
->i_flags
))) {
900 set_bit(IVTV_F_I_WORK_HANDLER_VBI
, &itv
->i_flags
);
901 set_bit(IVTV_F_I_HAVE_WORK
, &itv
->i_flags
);
904 /* Check if we need to update the yuv registers */
905 if (yi
->running
&& (yi
->yuv_forced_update
|| f
->update
)) {
908 (u8
)(atomic_read(&yi
->next_dma_frame
) -
909 1) % IVTV_YUV_BUFFERS
;
910 f
= &yi
->new_frame_info
[last_dma_frame
];
914 yi
->update_frame
= last_dma_frame
;
916 yi
->yuv_forced_update
= 0;
917 set_bit(IVTV_F_I_WORK_HANDLER_YUV
, &itv
->i_flags
);
918 set_bit(IVTV_F_I_HAVE_WORK
, &itv
->i_flags
);
926 #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
928 irqreturn_t
ivtv_irq_handler(int irq
, void *dev_id
)
930 struct ivtv
*itv
= (struct ivtv
*)dev_id
;
936 spin_lock(&itv
->dma_reg_lock
);
937 /* get contents of irq status register */
938 stat
= read_reg(IVTV_REG_IRQSTATUS
);
940 combo
= ~itv
->irqmask
& stat
;
943 if (combo
) write_reg(combo
, IVTV_REG_IRQSTATUS
);
946 /* The vsync interrupt is unusual and clears itself. If we
947 * took too long, we may have missed it. Do some checks
949 if (~itv
->irqmask
& IVTV_IRQ_DEC_VSYNC
) {
950 /* vsync is enabled, see if we're in a new field */
951 if ((itv
->last_vsync_field
& 1) !=
952 (read_reg(IVTV_REG_DEC_LINE_FIELD
) & 1)) {
953 /* New field, looks like we missed it */
954 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",
955 read_reg(IVTV_REG_DEC_LINE_FIELD
) >> 16);
961 /* No Vsync expected, wasn't for us */
962 spin_unlock(&itv
->dma_reg_lock
);
967 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
969 if (combo
& ~0xff6d0400)
970 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo
);
972 if (combo
& IVTV_IRQ_DEC_DMA_COMPLETE
) {
973 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
976 if (combo
& IVTV_IRQ_DMA_READ
) {
977 ivtv_irq_dma_read(itv
);
980 if (combo
& IVTV_IRQ_ENC_DMA_COMPLETE
) {
981 ivtv_irq_enc_dma_complete(itv
);
984 if (combo
& IVTV_IRQ_ENC_PIO_COMPLETE
) {
985 ivtv_irq_enc_pio_complete(itv
);
988 if (combo
& IVTV_IRQ_DMA_ERR
) {
989 ivtv_irq_dma_err(itv
);
992 if (combo
& IVTV_IRQ_ENC_START_CAP
) {
993 ivtv_irq_enc_start_cap(itv
);
996 if (combo
& IVTV_IRQ_ENC_VBI_CAP
) {
997 ivtv_irq_enc_vbi_cap(itv
);
1000 if (combo
& IVTV_IRQ_DEC_VBI_RE_INSERT
) {
1001 ivtv_irq_dec_vbi_reinsert(itv
);
1004 if (combo
& IVTV_IRQ_ENC_EOS
) {
1005 IVTV_DEBUG_IRQ("ENC EOS\n");
1006 set_bit(IVTV_F_I_EOS
, &itv
->i_flags
);
1007 wake_up(&itv
->eos_waitq
);
1010 if (combo
& IVTV_IRQ_DEC_DATA_REQ
) {
1011 ivtv_irq_dec_data_req(itv
);
1014 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
1015 if (~itv
->irqmask
& IVTV_IRQ_DEC_VSYNC
) {
1016 ivtv_irq_vsync(itv
);
1019 if (combo
& IVTV_IRQ_ENC_VIM_RST
) {
1020 IVTV_DEBUG_IRQ("VIM RST\n");
1021 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
1024 if (combo
& IVTV_IRQ_DEC_AUD_MODE_CHG
) {
1025 IVTV_DEBUG_INFO("Stereo mode changed\n");
1028 if ((combo
& IVTV_IRQ_DMA
) && !test_bit(IVTV_F_I_DMA
, &itv
->i_flags
)) {
1030 for (i
= 0; i
< IVTV_MAX_STREAMS
; i
++) {
1031 int idx
= (i
+ itv
->irq_rr_idx
) % IVTV_MAX_STREAMS
;
1032 struct ivtv_stream
*s
= &itv
->streams
[idx
];
1034 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING
, &s
->s_flags
))
1036 if (s
->type
>= IVTV_DEC_STREAM_TYPE_MPG
)
1037 ivtv_dma_dec_start(s
);
1039 ivtv_dma_enc_start(s
);
1043 if (i
== IVTV_MAX_STREAMS
&&
1044 test_bit(IVTV_F_I_UDMA_PENDING
, &itv
->i_flags
))
1045 ivtv_udma_start(itv
);
1048 if ((combo
& IVTV_IRQ_DMA
) && !test_bit(IVTV_F_I_PIO
, &itv
->i_flags
)) {
1050 for (i
= 0; i
< IVTV_MAX_STREAMS
; i
++) {
1051 int idx
= (i
+ itv
->irq_rr_idx
) % IVTV_MAX_STREAMS
;
1052 struct ivtv_stream
*s
= &itv
->streams
[idx
];
1054 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING
, &s
->s_flags
))
1056 if (s
->type
== IVTV_DEC_STREAM_TYPE_VBI
|| s
->type
< IVTV_DEC_STREAM_TYPE_MPG
)
1057 ivtv_dma_enc_start(s
);
1062 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK
, &itv
->i_flags
)) {
1063 queue_kthread_work(&itv
->irq_worker
, &itv
->irq_work
);
1066 spin_unlock(&itv
->dma_reg_lock
);
1068 /* If we've just handled a 'forced' vsync, it's safest to say it
1069 * wasn't ours. Another device may have triggered it at just
1072 return vsync_force
? IRQ_NONE
: IRQ_HANDLED
;
1075 void ivtv_unfinished_dma(unsigned long arg
)
1077 struct ivtv
*itv
= (struct ivtv
*)arg
;
1079 if (!test_bit(IVTV_F_I_DMA
, &itv
->i_flags
))
1081 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS
), itv
->cur_dma_stream
);
1083 write_reg(read_reg(IVTV_REG_DMASTATUS
) & 3, IVTV_REG_DMASTATUS
);
1084 clear_bit(IVTV_F_I_UDMA
, &itv
->i_flags
);
1085 clear_bit(IVTV_F_I_DMA
, &itv
->i_flags
);
1086 itv
->cur_dma_stream
= -1;
1087 wake_up(&itv
->dma_waitq
);