2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "ivtv-driver.h"
22 #include "ivtv-queue.h"
23 #include "ivtv-udma.h"
25 #include "ivtv-mailbox.h"
28 #include <media/v4l2-event.h>
30 #define DMA_MAGIC_COOKIE 0x000001fe
32 static void ivtv_dma_dec_start(struct ivtv_stream
*s
);
34 static const int ivtv_stream_map
[] = {
35 IVTV_ENC_STREAM_TYPE_MPG
,
36 IVTV_ENC_STREAM_TYPE_YUV
,
37 IVTV_ENC_STREAM_TYPE_PCM
,
38 IVTV_ENC_STREAM_TYPE_VBI
,
42 static void ivtv_pio_work_handler(struct ivtv
*itv
)
44 struct ivtv_stream
*s
= &itv
->streams
[itv
->cur_pio_stream
];
45 struct ivtv_buffer
*buf
;
48 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
49 if (itv
->cur_pio_stream
< 0 || itv
->cur_pio_stream
>= IVTV_MAX_STREAMS
||
50 s
->vdev
== NULL
|| !ivtv_use_pio(s
)) {
51 itv
->cur_pio_stream
= -1;
52 /* trigger PIO complete user interrupt */
53 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE
, 0x44);
56 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s
->name
);
57 list_for_each_entry(buf
, &s
->q_dma
.list
, list
) {
58 u32 size
= s
->sg_processing
[i
].size
& 0x3ffff;
60 /* Copy the data from the card to the buffer */
61 if (s
->type
== IVTV_DEC_STREAM_TYPE_VBI
) {
62 memcpy_fromio(buf
->buf
, itv
->dec_mem
+ s
->sg_processing
[i
].src
- IVTV_DECODER_OFFSET
, size
);
65 memcpy_fromio(buf
->buf
, itv
->enc_mem
+ s
->sg_processing
[i
].src
, size
);
68 if (i
== s
->sg_processing_size
)
71 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE
, 0x44);
74 void ivtv_irq_work_handler(struct kthread_work
*work
)
76 struct ivtv
*itv
= container_of(work
, struct ivtv
, irq_work
);
78 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO
, &itv
->i_flags
))
79 ivtv_pio_work_handler(itv
);
81 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI
, &itv
->i_flags
))
82 ivtv_vbi_work_handler(itv
);
84 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV
, &itv
->i_flags
))
85 ivtv_yuv_work_handler(itv
);
88 /* Determine the required DMA size, setup enough buffers in the predma queue and
89 actually copy the data from the card to the buffers in case a PIO transfer is
90 required for this stream.
92 static int stream_enc_dma_append(struct ivtv_stream
*s
, u32 data
[CX2341X_MBOX_MAX_DATA
])
94 struct ivtv
*itv
= s
->itv
;
95 struct ivtv_buffer
*buf
;
98 u32 UVoffset
= 0, UVsize
= 0;
99 int skip_bufs
= s
->q_predma
.buffers
;
100 int idx
= s
->sg_pending_size
;
104 if (s
->vdev
== NULL
) {
105 IVTV_DEBUG_WARN("Stream %s not started\n", s
->name
);
108 if (!test_bit(IVTV_F_S_CLAIMED
, &s
->s_flags
)) {
109 IVTV_DEBUG_WARN("Stream %s not open\n", s
->name
);
113 /* determine offset, size and PTS for the various streams */
115 case IVTV_ENC_STREAM_TYPE_MPG
:
121 case IVTV_ENC_STREAM_TYPE_YUV
:
126 s
->pending_pts
= ((u64
) data
[5] << 32) | data
[6];
129 case IVTV_ENC_STREAM_TYPE_PCM
:
130 offset
= data
[1] + 12;
132 s
->pending_pts
= read_dec(offset
- 8) |
133 ((u64
)(read_dec(offset
- 12)) << 32);
134 if (itv
->has_cx23415
)
135 offset
+= IVTV_DECODER_OFFSET
;
138 case IVTV_ENC_STREAM_TYPE_VBI
:
139 size
= itv
->vbi
.enc_size
* itv
->vbi
.fpi
;
140 offset
= read_enc(itv
->vbi
.enc_start
- 4) + 12;
142 IVTV_DEBUG_INFO("VBI offset == 0\n");
145 s
->pending_pts
= read_enc(offset
- 4) | ((u64
)read_enc(offset
- 8) << 32);
148 case IVTV_DEC_STREAM_TYPE_VBI
:
149 size
= read_dec(itv
->vbi
.dec_start
+ 4) + 8;
150 offset
= read_dec(itv
->vbi
.dec_start
) + itv
->vbi
.dec_start
;
152 offset
+= IVTV_DECODER_OFFSET
;
155 /* shouldn't happen */
159 /* if this is the start of the DMA then fill in the magic cookie */
160 if (s
->sg_pending_size
== 0 && ivtv_use_dma(s
)) {
161 if (itv
->has_cx23415
&& (s
->type
== IVTV_ENC_STREAM_TYPE_PCM
||
162 s
->type
== IVTV_DEC_STREAM_TYPE_VBI
)) {
163 s
->pending_backup
= read_dec(offset
- IVTV_DECODER_OFFSET
);
164 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE
), offset
- IVTV_DECODER_OFFSET
);
167 s
->pending_backup
= read_enc(offset
);
168 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE
), offset
);
170 s
->pending_offset
= offset
;
174 if (s
->type
== IVTV_ENC_STREAM_TYPE_YUV
) {
175 /* The size for the Y samples needs to be rounded upwards to a
176 multiple of the buf_size. The UV samples then start in the
178 bytes_needed
= s
->buf_size
* ((bytes_needed
+ s
->buf_size
- 1) / s
->buf_size
);
179 bytes_needed
+= UVsize
;
182 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
183 ivtv_use_pio(s
) ? "PIO" : "DMA", s
->name
, bytes_needed
, offset
);
185 rc
= ivtv_queue_move(s
, &s
->q_free
, &s
->q_full
, &s
->q_predma
, bytes_needed
);
186 if (rc
< 0) { /* Insufficient buffers */
187 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
188 bytes_needed
, s
->name
);
191 if (rc
&& !s
->buffers_stolen
&& test_bit(IVTV_F_S_APPL_IO
, &s
->s_flags
)) {
192 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s
->name
);
193 IVTV_WARN("Cause: the application is not reading fast enough.\n");
195 s
->buffers_stolen
= rc
;
197 /* got the buffers, now fill in sg_pending */
198 buf
= list_entry(s
->q_predma
.list
.next
, struct ivtv_buffer
, list
);
199 memset(buf
->buf
, 0, 128);
200 list_for_each_entry(buf
, &s
->q_predma
.list
, list
) {
203 s
->sg_pending
[idx
].dst
= buf
->dma_handle
;
204 s
->sg_pending
[idx
].src
= offset
;
205 s
->sg_pending
[idx
].size
= s
->buf_size
;
206 buf
->bytesused
= min(size
, s
->buf_size
);
207 buf
->dma_xfer_cnt
= s
->dma_xfer_cnt
;
209 s
->q_predma
.bytesused
+= buf
->bytesused
;
210 size
-= buf
->bytesused
;
211 offset
+= s
->buf_size
;
213 /* Sync SG buffers */
214 ivtv_buf_sync_for_device(s
, buf
);
216 if (size
== 0) { /* YUV */
217 /* process the UV section */
223 s
->sg_pending_size
= idx
;
227 static void dma_post(struct ivtv_stream
*s
)
229 struct ivtv
*itv
= s
->itv
;
230 struct ivtv_buffer
*buf
= NULL
;
236 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s
) ? "PIO" : "DMA",
237 s
->name
, s
->dma_offset
);
238 list_for_each(p
, &s
->q_dma
.list
) {
239 buf
= list_entry(p
, struct ivtv_buffer
, list
);
240 u32buf
= (__le32
*)buf
->buf
;
243 ivtv_buf_sync_for_cpu(s
, buf
);
245 if (x
== 0 && ivtv_use_dma(s
)) {
246 offset
= s
->dma_last_offset
;
247 if (u32buf
[offset
/ 4] != DMA_MAGIC_COOKIE
)
249 for (offset
= 0; offset
< 64; offset
++) {
250 if (u32buf
[offset
] == DMA_MAGIC_COOKIE
) {
256 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s
->name
);
257 offset
= s
->dma_last_offset
;
259 if (s
->dma_last_offset
!= offset
)
260 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s
->name
, s
->dma_last_offset
, offset
);
261 s
->dma_last_offset
= offset
;
263 if (itv
->has_cx23415
&& (s
->type
== IVTV_ENC_STREAM_TYPE_PCM
||
264 s
->type
== IVTV_DEC_STREAM_TYPE_VBI
)) {
265 write_dec_sync(0, s
->dma_offset
- IVTV_DECODER_OFFSET
);
268 write_enc_sync(0, s
->dma_offset
);
271 buf
->bytesused
-= offset
;
272 memcpy(buf
->buf
, buf
->buf
+ offset
, buf
->bytesused
+ offset
);
274 *u32buf
= cpu_to_le32(s
->dma_backup
);
277 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
278 if (s
->type
== IVTV_ENC_STREAM_TYPE_MPG
||
279 s
->type
== IVTV_ENC_STREAM_TYPE_VBI
)
280 buf
->b_flags
|= IVTV_F_B_NEED_BUF_SWAP
;
283 buf
->bytesused
+= s
->dma_last_offset
;
284 if (buf
&& s
->type
== IVTV_DEC_STREAM_TYPE_VBI
) {
285 list_for_each_entry(buf
, &s
->q_dma
.list
, list
) {
286 /* Parse and Groom VBI Data */
287 s
->q_dma
.bytesused
-= buf
->bytesused
;
288 ivtv_process_vbi_data(itv
, buf
, 0, s
->type
);
289 s
->q_dma
.bytesused
+= buf
->bytesused
;
292 ivtv_queue_move(s
, &s
->q_dma
, NULL
, &s
->q_free
, 0);
296 ivtv_queue_move(s
, &s
->q_dma
, NULL
, &s
->q_full
, s
->q_dma
.bytesused
);
301 void ivtv_dma_stream_dec_prepare(struct ivtv_stream
*s
, u32 offset
, int lock
)
303 struct ivtv
*itv
= s
->itv
;
304 struct yuv_playback_info
*yi
= &itv
->yuv_info
;
305 u8 frame
= yi
->draw_frame
;
306 struct yuv_frame_info
*f
= &yi
->new_frame_info
[frame
];
307 struct ivtv_buffer
*buf
;
308 u32 y_size
= 720 * ((f
->src_h
+ 31) & ~31);
309 u32 uv_offset
= offset
+ IVTV_YUV_BUFFER_UV_OFFSET
;
311 int bytes_written
= 0;
312 unsigned long flags
= 0;
315 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s
->name
, s
->q_predma
.bytesused
, offset
);
317 /* Insert buffer block for YUV if needed */
318 if (s
->type
== IVTV_DEC_STREAM_TYPE_YUV
&& f
->offset_y
) {
319 if (yi
->blanking_dmaptr
) {
320 s
->sg_pending
[idx
].src
= yi
->blanking_dmaptr
;
321 s
->sg_pending
[idx
].dst
= offset
;
322 s
->sg_pending
[idx
].size
= 720 * 16;
328 list_for_each_entry(buf
, &s
->q_predma
.list
, list
) {
329 /* YUV UV Offset from Y Buffer */
330 if (s
->type
== IVTV_DEC_STREAM_TYPE_YUV
&& !y_done
&&
331 (bytes_written
+ buf
->bytesused
) >= y_size
) {
332 s
->sg_pending
[idx
].src
= buf
->dma_handle
;
333 s
->sg_pending
[idx
].dst
= offset
;
334 s
->sg_pending
[idx
].size
= y_size
- bytes_written
;
336 if (s
->sg_pending
[idx
].size
!= buf
->bytesused
) {
338 s
->sg_pending
[idx
].src
=
339 buf
->dma_handle
+ s
->sg_pending
[idx
- 1].size
;
340 s
->sg_pending
[idx
].dst
= offset
;
341 s
->sg_pending
[idx
].size
=
342 buf
->bytesused
- s
->sg_pending
[idx
- 1].size
;
343 offset
+= s
->sg_pending
[idx
].size
;
347 s
->sg_pending
[idx
].src
= buf
->dma_handle
;
348 s
->sg_pending
[idx
].dst
= offset
;
349 s
->sg_pending
[idx
].size
= buf
->bytesused
;
350 offset
+= buf
->bytesused
;
352 bytes_written
+= buf
->bytesused
;
354 /* Sync SG buffers */
355 ivtv_buf_sync_for_device(s
, buf
);
358 s
->sg_pending_size
= idx
;
360 /* Sync Hardware SG List of buffers */
361 ivtv_stream_sync_for_device(s
);
363 spin_lock_irqsave(&itv
->dma_reg_lock
, flags
);
364 if (!test_bit(IVTV_F_I_DMA
, &itv
->i_flags
)) {
365 ivtv_dma_dec_start(s
);
368 set_bit(IVTV_F_S_DMA_PENDING
, &s
->s_flags
);
371 spin_unlock_irqrestore(&itv
->dma_reg_lock
, flags
);
374 static void ivtv_dma_enc_start_xfer(struct ivtv_stream
*s
)
376 struct ivtv
*itv
= s
->itv
;
378 s
->sg_dma
->src
= cpu_to_le32(s
->sg_processing
[s
->sg_processed
].src
);
379 s
->sg_dma
->dst
= cpu_to_le32(s
->sg_processing
[s
->sg_processed
].dst
);
380 s
->sg_dma
->size
= cpu_to_le32(s
->sg_processing
[s
->sg_processed
].size
| 0x80000000);
382 /* Sync Hardware SG List of buffers */
383 ivtv_stream_sync_for_device(s
);
384 write_reg(s
->sg_handle
, IVTV_REG_ENCDMAADDR
);
385 write_reg_sync(read_reg(IVTV_REG_DMAXFER
) | 0x02, IVTV_REG_DMAXFER
);
386 itv
->dma_timer
.expires
= jiffies
+ msecs_to_jiffies(300);
387 add_timer(&itv
->dma_timer
);
390 static void ivtv_dma_dec_start_xfer(struct ivtv_stream
*s
)
392 struct ivtv
*itv
= s
->itv
;
394 s
->sg_dma
->src
= cpu_to_le32(s
->sg_processing
[s
->sg_processed
].src
);
395 s
->sg_dma
->dst
= cpu_to_le32(s
->sg_processing
[s
->sg_processed
].dst
);
396 s
->sg_dma
->size
= cpu_to_le32(s
->sg_processing
[s
->sg_processed
].size
| 0x80000000);
398 /* Sync Hardware SG List of buffers */
399 ivtv_stream_sync_for_device(s
);
400 write_reg(s
->sg_handle
, IVTV_REG_DECDMAADDR
);
401 write_reg_sync(read_reg(IVTV_REG_DMAXFER
) | 0x01, IVTV_REG_DMAXFER
);
402 itv
->dma_timer
.expires
= jiffies
+ msecs_to_jiffies(300);
403 add_timer(&itv
->dma_timer
);
406 /* start the encoder DMA */
407 static void ivtv_dma_enc_start(struct ivtv_stream
*s
)
409 struct ivtv
*itv
= s
->itv
;
410 struct ivtv_stream
*s_vbi
= &itv
->streams
[IVTV_ENC_STREAM_TYPE_VBI
];
413 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s
) ? "DMA" : "PIO", s
->name
);
415 if (s
->q_predma
.bytesused
)
416 ivtv_queue_move(s
, &s
->q_predma
, NULL
, &s
->q_dma
, s
->q_predma
.bytesused
);
419 s
->sg_pending
[s
->sg_pending_size
- 1].size
+= 256;
421 /* If this is an MPEG stream, and VBI data is also pending, then append the
422 VBI DMA to the MPEG DMA and transfer both sets of data at once.
424 VBI DMA is a second class citizen compared to MPEG and mixing them together
425 will confuse the firmware (the end of a VBI DMA is seen as the end of a
426 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
427 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
428 use. This way no conflicts occur. */
429 clear_bit(IVTV_F_S_DMA_HAS_VBI
, &s
->s_flags
);
430 if (s
->type
== IVTV_ENC_STREAM_TYPE_MPG
&& s_vbi
->sg_pending_size
&&
431 s
->sg_pending_size
+ s_vbi
->sg_pending_size
<= s
->buffers
) {
432 ivtv_queue_move(s_vbi
, &s_vbi
->q_predma
, NULL
, &s_vbi
->q_dma
, s_vbi
->q_predma
.bytesused
);
433 if (ivtv_use_dma(s_vbi
))
434 s_vbi
->sg_pending
[s_vbi
->sg_pending_size
- 1].size
+= 256;
435 for (i
= 0; i
< s_vbi
->sg_pending_size
; i
++) {
436 s
->sg_pending
[s
->sg_pending_size
++] = s_vbi
->sg_pending
[i
];
438 s_vbi
->dma_offset
= s_vbi
->pending_offset
;
439 s_vbi
->sg_pending_size
= 0;
440 s_vbi
->dma_xfer_cnt
++;
441 set_bit(IVTV_F_S_DMA_HAS_VBI
, &s
->s_flags
);
442 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi
->name
);
446 memcpy(s
->sg_processing
, s
->sg_pending
, sizeof(struct ivtv_sg_host_element
) * s
->sg_pending_size
);
447 s
->sg_processing_size
= s
->sg_pending_size
;
448 s
->sg_pending_size
= 0;
450 s
->dma_offset
= s
->pending_offset
;
451 s
->dma_backup
= s
->pending_backup
;
452 s
->dma_pts
= s
->pending_pts
;
454 if (ivtv_use_pio(s
)) {
455 set_bit(IVTV_F_I_WORK_HANDLER_PIO
, &itv
->i_flags
);
456 set_bit(IVTV_F_I_HAVE_WORK
, &itv
->i_flags
);
457 set_bit(IVTV_F_I_PIO
, &itv
->i_flags
);
458 itv
->cur_pio_stream
= s
->type
;
461 itv
->dma_retries
= 0;
462 ivtv_dma_enc_start_xfer(s
);
463 set_bit(IVTV_F_I_DMA
, &itv
->i_flags
);
464 itv
->cur_dma_stream
= s
->type
;
468 static void ivtv_dma_dec_start(struct ivtv_stream
*s
)
470 struct ivtv
*itv
= s
->itv
;
472 if (s
->q_predma
.bytesused
)
473 ivtv_queue_move(s
, &s
->q_predma
, NULL
, &s
->q_dma
, s
->q_predma
.bytesused
);
475 memcpy(s
->sg_processing
, s
->sg_pending
, sizeof(struct ivtv_sg_host_element
) * s
->sg_pending_size
);
476 s
->sg_processing_size
= s
->sg_pending_size
;
477 s
->sg_pending_size
= 0;
480 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s
->name
);
481 itv
->dma_retries
= 0;
482 ivtv_dma_dec_start_xfer(s
);
483 set_bit(IVTV_F_I_DMA
, &itv
->i_flags
);
484 itv
->cur_dma_stream
= s
->type
;
487 static void ivtv_irq_dma_read(struct ivtv
*itv
)
489 struct ivtv_stream
*s
= NULL
;
490 struct ivtv_buffer
*buf
;
491 int hw_stream_type
= 0;
493 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
495 del_timer(&itv
->dma_timer
);
497 if (!test_bit(IVTV_F_I_UDMA
, &itv
->i_flags
) && itv
->cur_dma_stream
< 0)
500 if (!test_bit(IVTV_F_I_UDMA
, &itv
->i_flags
)) {
501 s
= &itv
->streams
[itv
->cur_dma_stream
];
502 ivtv_stream_sync_for_cpu(s
);
504 if (read_reg(IVTV_REG_DMASTATUS
) & 0x14) {
505 IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
506 read_reg(IVTV_REG_DMASTATUS
),
507 s
->sg_processed
, s
->sg_processing_size
, itv
->dma_retries
);
508 write_reg(read_reg(IVTV_REG_DMASTATUS
) & 3, IVTV_REG_DMASTATUS
);
509 if (itv
->dma_retries
== 3) {
510 /* Too many retries, give up on this frame */
511 itv
->dma_retries
= 0;
512 s
->sg_processed
= s
->sg_processing_size
;
515 /* Retry, starting with the first xfer segment.
516 Just retrying the current segment is not sufficient. */
521 if (s
->sg_processed
< s
->sg_processing_size
) {
522 /* DMA next buffer */
523 ivtv_dma_dec_start_xfer(s
);
526 if (s
->type
== IVTV_DEC_STREAM_TYPE_YUV
)
528 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s
->name
, s
->q_dma
.bytesused
);
530 /* For some reason must kick the firmware, like PIO mode,
531 I think this tells the firmware we are done and the size
532 of the xfer so it can calculate what we need next.
533 I think we can do this part ourselves but would have to
534 fully calculate xfer info ourselves and not use interrupts
536 ivtv_vapi(itv
, CX2341X_DEC_SCHED_DMA_FROM_HOST
, 3, 0, s
->q_dma
.bytesused
,
539 /* Free last DMA call */
540 while ((buf
= ivtv_dequeue(s
, &s
->q_dma
)) != NULL
) {
541 ivtv_buf_sync_for_cpu(s
, buf
);
542 ivtv_enqueue(s
, buf
, &s
->q_free
);
546 clear_bit(IVTV_F_I_UDMA
, &itv
->i_flags
);
547 clear_bit(IVTV_F_I_DMA
, &itv
->i_flags
);
548 itv
->cur_dma_stream
= -1;
549 wake_up(&itv
->dma_waitq
);
552 static void ivtv_irq_enc_dma_complete(struct ivtv
*itv
)
554 u32 data
[CX2341X_MBOX_MAX_DATA
];
555 struct ivtv_stream
*s
;
557 ivtv_api_get_data(&itv
->enc_mbox
, IVTV_MBOX_DMA_END
, 2, data
);
558 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data
[0], data
[1], itv
->cur_dma_stream
);
560 del_timer(&itv
->dma_timer
);
562 if (itv
->cur_dma_stream
< 0)
565 s
= &itv
->streams
[itv
->cur_dma_stream
];
566 ivtv_stream_sync_for_cpu(s
);
568 if (data
[0] & 0x18) {
569 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data
[0],
570 s
->dma_offset
, s
->sg_processed
, s
->sg_processing_size
, itv
->dma_retries
);
571 write_reg(read_reg(IVTV_REG_DMASTATUS
) & 3, IVTV_REG_DMASTATUS
);
572 if (itv
->dma_retries
== 3) {
573 /* Too many retries, give up on this frame */
574 itv
->dma_retries
= 0;
575 s
->sg_processed
= s
->sg_processing_size
;
578 /* Retry, starting with the first xfer segment.
579 Just retrying the current segment is not sufficient. */
584 if (s
->sg_processed
< s
->sg_processing_size
) {
585 /* DMA next buffer */
586 ivtv_dma_enc_start_xfer(s
);
589 clear_bit(IVTV_F_I_DMA
, &itv
->i_flags
);
590 itv
->cur_dma_stream
= -1;
592 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI
, &s
->s_flags
)) {
593 s
= &itv
->streams
[IVTV_ENC_STREAM_TYPE_VBI
];
596 s
->sg_processing_size
= 0;
598 wake_up(&itv
->dma_waitq
);
601 static void ivtv_irq_enc_pio_complete(struct ivtv
*itv
)
603 struct ivtv_stream
*s
;
605 if (itv
->cur_pio_stream
< 0 || itv
->cur_pio_stream
>= IVTV_MAX_STREAMS
) {
606 itv
->cur_pio_stream
= -1;
609 s
= &itv
->streams
[itv
->cur_pio_stream
];
610 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s
->name
);
611 clear_bit(IVTV_F_I_PIO
, &itv
->i_flags
);
612 itv
->cur_pio_stream
= -1;
614 if (s
->type
== IVTV_ENC_STREAM_TYPE_MPG
)
615 ivtv_vapi(itv
, CX2341X_ENC_SCHED_DMA_TO_HOST
, 3, 0, 0, 0);
616 else if (s
->type
== IVTV_ENC_STREAM_TYPE_YUV
)
617 ivtv_vapi(itv
, CX2341X_ENC_SCHED_DMA_TO_HOST
, 3, 0, 0, 1);
618 else if (s
->type
== IVTV_ENC_STREAM_TYPE_PCM
)
619 ivtv_vapi(itv
, CX2341X_ENC_SCHED_DMA_TO_HOST
, 3, 0, 0, 2);
620 clear_bit(IVTV_F_I_PIO
, &itv
->i_flags
);
621 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI
, &s
->s_flags
)) {
622 s
= &itv
->streams
[IVTV_ENC_STREAM_TYPE_VBI
];
625 wake_up(&itv
->dma_waitq
);
628 static void ivtv_irq_dma_err(struct ivtv
*itv
)
630 u32 data
[CX2341X_MBOX_MAX_DATA
];
632 del_timer(&itv
->dma_timer
);
633 ivtv_api_get_data(&itv
->enc_mbox
, IVTV_MBOX_DMA_END
, 2, data
);
634 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data
[0], data
[1],
635 read_reg(IVTV_REG_DMASTATUS
), itv
->cur_dma_stream
);
636 write_reg(read_reg(IVTV_REG_DMASTATUS
) & 3, IVTV_REG_DMASTATUS
);
637 if (!test_bit(IVTV_F_I_UDMA
, &itv
->i_flags
) &&
638 itv
->cur_dma_stream
>= 0 && itv
->cur_dma_stream
< IVTV_MAX_STREAMS
) {
639 struct ivtv_stream
*s
= &itv
->streams
[itv
->cur_dma_stream
];
642 if (s
->type
>= IVTV_DEC_STREAM_TYPE_MPG
)
643 ivtv_dma_dec_start(s
);
645 ivtv_dma_enc_start(s
);
648 if (test_bit(IVTV_F_I_UDMA
, &itv
->i_flags
)) {
649 ivtv_udma_start(itv
);
652 clear_bit(IVTV_F_I_UDMA
, &itv
->i_flags
);
653 clear_bit(IVTV_F_I_DMA
, &itv
->i_flags
);
654 itv
->cur_dma_stream
= -1;
655 wake_up(&itv
->dma_waitq
);
658 static void ivtv_irq_enc_start_cap(struct ivtv
*itv
)
660 u32 data
[CX2341X_MBOX_MAX_DATA
];
661 struct ivtv_stream
*s
;
663 /* Get DMA destination and size arguments from card */
664 ivtv_api_get_data(&itv
->enc_mbox
, IVTV_MBOX_DMA
, 7, data
);
665 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data
[0], data
[1], data
[2]);
667 if (data
[0] > 2 || data
[1] == 0 || data
[2] == 0) {
668 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
669 data
[0], data
[1], data
[2]);
672 s
= &itv
->streams
[ivtv_stream_map
[data
[0]]];
673 if (!stream_enc_dma_append(s
, data
)) {
674 set_bit(ivtv_use_pio(s
) ? IVTV_F_S_PIO_PENDING
: IVTV_F_S_DMA_PENDING
, &s
->s_flags
);
678 static void ivtv_irq_enc_vbi_cap(struct ivtv
*itv
)
680 u32 data
[CX2341X_MBOX_MAX_DATA
];
681 struct ivtv_stream
*s
;
683 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
684 s
= &itv
->streams
[IVTV_ENC_STREAM_TYPE_VBI
];
686 if (!stream_enc_dma_append(s
, data
))
687 set_bit(ivtv_use_pio(s
) ? IVTV_F_S_PIO_PENDING
: IVTV_F_S_DMA_PENDING
, &s
->s_flags
);
690 static void ivtv_irq_dec_vbi_reinsert(struct ivtv
*itv
)
692 u32 data
[CX2341X_MBOX_MAX_DATA
];
693 struct ivtv_stream
*s
= &itv
->streams
[IVTV_DEC_STREAM_TYPE_VBI
];
695 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
696 if (test_bit(IVTV_F_S_CLAIMED
, &s
->s_flags
) &&
697 !stream_enc_dma_append(s
, data
)) {
698 set_bit(IVTV_F_S_PIO_PENDING
, &s
->s_flags
);
702 static void ivtv_irq_dec_data_req(struct ivtv
*itv
)
704 u32 data
[CX2341X_MBOX_MAX_DATA
];
705 struct ivtv_stream
*s
;
709 if (test_bit(IVTV_F_I_DEC_YUV
, &itv
->i_flags
)) {
710 ivtv_api_get_data(&itv
->dec_mbox
, IVTV_MBOX_DMA
, 2, data
);
711 itv
->dma_data_req_size
=
712 1080 * ((itv
->yuv_info
.v4l2_src_h
+ 31) & ~31);
713 itv
->dma_data_req_offset
= data
[1];
714 if (atomic_read(&itv
->yuv_info
.next_dma_frame
) >= 0)
715 ivtv_yuv_frame_complete(itv
);
716 s
= &itv
->streams
[IVTV_DEC_STREAM_TYPE_YUV
];
719 ivtv_api_get_data(&itv
->dec_mbox
, IVTV_MBOX_DMA
, 3, data
);
720 itv
->dma_data_req_size
= min_t(u32
, data
[2], 0x10000);
721 itv
->dma_data_req_offset
= data
[1];
722 s
= &itv
->streams
[IVTV_DEC_STREAM_TYPE_MPG
];
724 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s
->name
, s
->q_full
.bytesused
,
725 itv
->dma_data_req_offset
, itv
->dma_data_req_size
);
726 if (itv
->dma_data_req_size
== 0 || s
->q_full
.bytesused
< itv
->dma_data_req_size
) {
727 set_bit(IVTV_F_S_NEEDS_DATA
, &s
->s_flags
);
730 if (test_bit(IVTV_F_I_DEC_YUV
, &itv
->i_flags
))
731 ivtv_yuv_setup_stream_frame(itv
);
732 clear_bit(IVTV_F_S_NEEDS_DATA
, &s
->s_flags
);
733 ivtv_queue_move(s
, &s
->q_full
, NULL
, &s
->q_predma
, itv
->dma_data_req_size
);
734 ivtv_dma_stream_dec_prepare(s
, itv
->dma_data_req_offset
+ IVTV_DECODER_OFFSET
, 0);
738 static void ivtv_irq_vsync(struct ivtv
*itv
)
740 /* The vsync interrupt is unusual in that it won't clear until
741 * the end of the first line for the current field, at which
742 * point it clears itself. This can result in repeated vsync
743 * interrupts, or a missed vsync. Read some of the registers
744 * to determine the line being displayed and ensure we handle
745 * one vsync per frame.
747 unsigned int frame
= read_reg(IVTV_REG_DEC_LINE_FIELD
) & 1;
748 struct yuv_playback_info
*yi
= &itv
->yuv_info
;
749 int last_dma_frame
= atomic_read(&yi
->next_dma_frame
);
750 struct yuv_frame_info
*f
= &yi
->new_frame_info
[last_dma_frame
];
752 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
754 if (((frame
^ f
->sync_field
) == 0 &&
755 ((itv
->last_vsync_field
& 1) ^ f
->sync_field
)) ||
756 (frame
!= (itv
->last_vsync_field
& 1) && !f
->interlaced
)) {
757 int next_dma_frame
= last_dma_frame
;
759 if (!(f
->interlaced
&& f
->delay
&& yi
->fields_lapsed
< 1)) {
760 if (next_dma_frame
>= 0 && next_dma_frame
!= atomic_read(&yi
->next_fill_frame
)) {
761 write_reg(yuv_offset
[next_dma_frame
] >> 4, 0x82c);
762 write_reg((yuv_offset
[next_dma_frame
] + IVTV_YUV_BUFFER_UV_OFFSET
) >> 4, 0x830);
763 write_reg(yuv_offset
[next_dma_frame
] >> 4, 0x834);
764 write_reg((yuv_offset
[next_dma_frame
] + IVTV_YUV_BUFFER_UV_OFFSET
) >> 4, 0x838);
765 next_dma_frame
= (next_dma_frame
+ 1) % IVTV_YUV_BUFFERS
;
766 atomic_set(&yi
->next_dma_frame
, next_dma_frame
);
767 yi
->fields_lapsed
= -1;
772 if (frame
!= (itv
->last_vsync_field
& 1)) {
773 static const struct v4l2_event evtop
= {
774 .type
= V4L2_EVENT_VSYNC
,
775 .u
.vsync
.field
= V4L2_FIELD_TOP
,
777 static const struct v4l2_event evbottom
= {
778 .type
= V4L2_EVENT_VSYNC
,
779 .u
.vsync
.field
= V4L2_FIELD_BOTTOM
,
781 struct ivtv_stream
*s
= ivtv_get_output_stream(itv
);
783 itv
->last_vsync_field
+= 1;
785 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS
, &itv
->i_flags
);
786 clear_bit(IVTV_F_I_EV_VSYNC_FIELD
, &itv
->i_flags
);
789 set_bit(IVTV_F_I_EV_VSYNC_FIELD
, &itv
->i_flags
);
791 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED
, &itv
->i_flags
)) {
792 set_bit(IVTV_F_I_EV_VSYNC
, &itv
->i_flags
);
793 wake_up(&itv
->event_waitq
);
798 v4l2_event_queue(s
->vdev
, frame
? &evtop
: &evbottom
);
799 wake_up(&itv
->vsync_waitq
);
801 /* Send VBI to saa7127 */
802 if (frame
&& (itv
->output_mode
== OUT_PASSTHROUGH
||
803 test_bit(IVTV_F_I_UPDATE_WSS
, &itv
->i_flags
) ||
804 test_bit(IVTV_F_I_UPDATE_VPS
, &itv
->i_flags
) ||
805 test_bit(IVTV_F_I_UPDATE_CC
, &itv
->i_flags
))) {
806 set_bit(IVTV_F_I_WORK_HANDLER_VBI
, &itv
->i_flags
);
807 set_bit(IVTV_F_I_HAVE_WORK
, &itv
->i_flags
);
810 /* Check if we need to update the yuv registers */
811 if (yi
->running
&& (yi
->yuv_forced_update
|| f
->update
)) {
814 (u8
)(atomic_read(&yi
->next_dma_frame
) -
815 1) % IVTV_YUV_BUFFERS
;
816 f
= &yi
->new_frame_info
[last_dma_frame
];
820 yi
->update_frame
= last_dma_frame
;
822 yi
->yuv_forced_update
= 0;
823 set_bit(IVTV_F_I_WORK_HANDLER_YUV
, &itv
->i_flags
);
824 set_bit(IVTV_F_I_HAVE_WORK
, &itv
->i_flags
);
832 #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
834 irqreturn_t
ivtv_irq_handler(int irq
, void *dev_id
)
836 struct ivtv
*itv
= (struct ivtv
*)dev_id
;
842 spin_lock(&itv
->dma_reg_lock
);
843 /* get contents of irq status register */
844 stat
= read_reg(IVTV_REG_IRQSTATUS
);
846 combo
= ~itv
->irqmask
& stat
;
849 if (combo
) write_reg(combo
, IVTV_REG_IRQSTATUS
);
852 /* The vsync interrupt is unusual and clears itself. If we
853 * took too long, we may have missed it. Do some checks
855 if (~itv
->irqmask
& IVTV_IRQ_DEC_VSYNC
) {
856 /* vsync is enabled, see if we're in a new field */
857 if ((itv
->last_vsync_field
& 1) !=
858 (read_reg(IVTV_REG_DEC_LINE_FIELD
) & 1)) {
859 /* New field, looks like we missed it */
860 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",
861 read_reg(IVTV_REG_DEC_LINE_FIELD
) >> 16);
867 /* No Vsync expected, wasn't for us */
868 spin_unlock(&itv
->dma_reg_lock
);
873 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
875 if (combo
& ~0xff6d0400)
876 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo
);
878 if (combo
& IVTV_IRQ_DEC_DMA_COMPLETE
) {
879 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
882 if (combo
& IVTV_IRQ_DMA_READ
) {
883 ivtv_irq_dma_read(itv
);
886 if (combo
& IVTV_IRQ_ENC_DMA_COMPLETE
) {
887 ivtv_irq_enc_dma_complete(itv
);
890 if (combo
& IVTV_IRQ_ENC_PIO_COMPLETE
) {
891 ivtv_irq_enc_pio_complete(itv
);
894 if (combo
& IVTV_IRQ_DMA_ERR
) {
895 ivtv_irq_dma_err(itv
);
898 if (combo
& IVTV_IRQ_ENC_START_CAP
) {
899 ivtv_irq_enc_start_cap(itv
);
902 if (combo
& IVTV_IRQ_ENC_VBI_CAP
) {
903 ivtv_irq_enc_vbi_cap(itv
);
906 if (combo
& IVTV_IRQ_DEC_VBI_RE_INSERT
) {
907 ivtv_irq_dec_vbi_reinsert(itv
);
910 if (combo
& IVTV_IRQ_ENC_EOS
) {
911 IVTV_DEBUG_IRQ("ENC EOS\n");
912 set_bit(IVTV_F_I_EOS
, &itv
->i_flags
);
913 wake_up(&itv
->eos_waitq
);
916 if (combo
& IVTV_IRQ_DEC_DATA_REQ
) {
917 ivtv_irq_dec_data_req(itv
);
920 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
921 if (~itv
->irqmask
& IVTV_IRQ_DEC_VSYNC
) {
925 if (combo
& IVTV_IRQ_ENC_VIM_RST
) {
926 IVTV_DEBUG_IRQ("VIM RST\n");
927 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
930 if (combo
& IVTV_IRQ_DEC_AUD_MODE_CHG
) {
931 IVTV_DEBUG_INFO("Stereo mode changed\n");
934 if ((combo
& IVTV_IRQ_DMA
) && !test_bit(IVTV_F_I_DMA
, &itv
->i_flags
)) {
936 for (i
= 0; i
< IVTV_MAX_STREAMS
; i
++) {
937 int idx
= (i
+ itv
->irq_rr_idx
) % IVTV_MAX_STREAMS
;
938 struct ivtv_stream
*s
= &itv
->streams
[idx
];
940 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING
, &s
->s_flags
))
942 if (s
->type
>= IVTV_DEC_STREAM_TYPE_MPG
)
943 ivtv_dma_dec_start(s
);
945 ivtv_dma_enc_start(s
);
949 if (i
== IVTV_MAX_STREAMS
&&
950 test_bit(IVTV_F_I_UDMA_PENDING
, &itv
->i_flags
))
951 ivtv_udma_start(itv
);
954 if ((combo
& IVTV_IRQ_DMA
) && !test_bit(IVTV_F_I_PIO
, &itv
->i_flags
)) {
956 for (i
= 0; i
< IVTV_MAX_STREAMS
; i
++) {
957 int idx
= (i
+ itv
->irq_rr_idx
) % IVTV_MAX_STREAMS
;
958 struct ivtv_stream
*s
= &itv
->streams
[idx
];
960 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING
, &s
->s_flags
))
962 if (s
->type
== IVTV_DEC_STREAM_TYPE_VBI
|| s
->type
< IVTV_DEC_STREAM_TYPE_MPG
)
963 ivtv_dma_enc_start(s
);
968 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK
, &itv
->i_flags
)) {
969 queue_kthread_work(&itv
->irq_worker
, &itv
->irq_work
);
972 spin_unlock(&itv
->dma_reg_lock
);
974 /* If we've just handled a 'forced' vsync, it's safest to say it
975 * wasn't ours. Another device may have triggered it at just
978 return vsync_force
? IRQ_NONE
: IRQ_HANDLED
;
981 void ivtv_unfinished_dma(unsigned long arg
)
983 struct ivtv
*itv
= (struct ivtv
*)arg
;
985 if (!test_bit(IVTV_F_I_DMA
, &itv
->i_flags
))
987 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS
), itv
->cur_dma_stream
);
989 write_reg(read_reg(IVTV_REG_DMASTATUS
) & 3, IVTV_REG_DMASTATUS
);
990 clear_bit(IVTV_F_I_UDMA
, &itv
->i_flags
);
991 clear_bit(IVTV_F_I_DMA
, &itv
->i_flags
);
992 itv
->cur_dma_stream
= -1;
993 wake_up(&itv
->dma_waitq
);