Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
[wrt350n-kernel.git] / drivers / media / video / ivtv / ivtv-irq.c
blob65604dde972614cfb9c1926c3dcd89649cacc2cf
1 /* interrupt handling
2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "ivtv-driver.h"
22 #include "ivtv-queue.h"
23 #include "ivtv-udma.h"
24 #include "ivtv-irq.h"
25 #include "ivtv-mailbox.h"
26 #include "ivtv-vbi.h"
27 #include "ivtv-yuv.h"
29 #define DMA_MAGIC_COOKIE 0x000001fe
31 static void ivtv_dma_dec_start(struct ivtv_stream *s);
33 static const int ivtv_stream_map[] = {
34 IVTV_ENC_STREAM_TYPE_MPG,
35 IVTV_ENC_STREAM_TYPE_YUV,
36 IVTV_ENC_STREAM_TYPE_PCM,
37 IVTV_ENC_STREAM_TYPE_VBI,
41 static void ivtv_pio_work_handler(struct ivtv *itv)
43 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
44 struct ivtv_buffer *buf;
45 int i = 0;
47 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
48 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
49 s->v4l2dev == NULL || !ivtv_use_pio(s)) {
50 itv->cur_pio_stream = -1;
51 /* trigger PIO complete user interrupt */
52 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
53 return;
55 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
56 list_for_each_entry(buf, &s->q_dma.list, list) {
57 u32 size = s->sg_processing[i].size & 0x3ffff;
59 /* Copy the data from the card to the buffer */
60 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
61 memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
63 else {
64 memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
66 i++;
67 if (i == s->sg_processing_size)
68 break;
70 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
73 void ivtv_irq_work_handler(struct work_struct *work)
75 struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
77 DEFINE_WAIT(wait);
79 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
80 ivtv_pio_work_handler(itv);
82 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
83 ivtv_vbi_work_handler(itv);
85 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
86 ivtv_yuv_work_handler(itv);
89 /* Determine the required DMA size, setup enough buffers in the predma queue and
90 actually copy the data from the card to the buffers in case a PIO transfer is
91 required for this stream.
93 static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
95 struct ivtv *itv = s->itv;
96 struct ivtv_buffer *buf;
97 u32 bytes_needed = 0;
98 u32 offset, size;
99 u32 UVoffset = 0, UVsize = 0;
100 int skip_bufs = s->q_predma.buffers;
101 int idx = s->sg_pending_size;
102 int rc;
104 /* sanity checks */
105 if (s->v4l2dev == NULL) {
106 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
107 return -1;
109 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
110 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
111 return -1;
114 /* determine offset, size and PTS for the various streams */
115 switch (s->type) {
116 case IVTV_ENC_STREAM_TYPE_MPG:
117 offset = data[1];
118 size = data[2];
119 s->pending_pts = 0;
120 break;
122 case IVTV_ENC_STREAM_TYPE_YUV:
123 offset = data[1];
124 size = data[2];
125 UVoffset = data[3];
126 UVsize = data[4];
127 s->pending_pts = ((u64) data[5] << 32) | data[6];
128 break;
130 case IVTV_ENC_STREAM_TYPE_PCM:
131 offset = data[1] + 12;
132 size = data[2] - 12;
133 s->pending_pts = read_dec(offset - 8) |
134 ((u64)(read_dec(offset - 12)) << 32);
135 if (itv->has_cx23415)
136 offset += IVTV_DECODER_OFFSET;
137 break;
139 case IVTV_ENC_STREAM_TYPE_VBI:
140 size = itv->vbi.enc_size * itv->vbi.fpi;
141 offset = read_enc(itv->vbi.enc_start - 4) + 12;
142 if (offset == 12) {
143 IVTV_DEBUG_INFO("VBI offset == 0\n");
144 return -1;
146 s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
147 break;
149 case IVTV_DEC_STREAM_TYPE_VBI:
150 size = read_dec(itv->vbi.dec_start + 4) + 8;
151 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
152 s->pending_pts = 0;
153 offset += IVTV_DECODER_OFFSET;
154 break;
155 default:
156 /* shouldn't happen */
157 return -1;
160 /* if this is the start of the DMA then fill in the magic cookie */
161 if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
162 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
163 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
164 s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
165 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
167 else {
168 s->pending_backup = read_enc(offset);
169 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
171 s->pending_offset = offset;
174 bytes_needed = size;
175 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
176 /* The size for the Y samples needs to be rounded upwards to a
177 multiple of the buf_size. The UV samples then start in the
178 next buffer. */
179 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
180 bytes_needed += UVsize;
183 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
184 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
186 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
187 if (rc < 0) { /* Insufficient buffers */
188 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
189 bytes_needed, s->name);
190 return -1;
192 if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) {
193 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
194 IVTV_WARN("Cause: the application is not reading fast enough.\n");
196 s->buffers_stolen = rc;
198 /* got the buffers, now fill in sg_pending */
199 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
200 memset(buf->buf, 0, 128);
201 list_for_each_entry(buf, &s->q_predma.list, list) {
202 if (skip_bufs-- > 0)
203 continue;
204 s->sg_pending[idx].dst = buf->dma_handle;
205 s->sg_pending[idx].src = offset;
206 s->sg_pending[idx].size = s->buf_size;
207 buf->bytesused = min(size, s->buf_size);
208 buf->dma_xfer_cnt = s->dma_xfer_cnt;
210 s->q_predma.bytesused += buf->bytesused;
211 size -= buf->bytesused;
212 offset += s->buf_size;
214 /* Sync SG buffers */
215 ivtv_buf_sync_for_device(s, buf);
217 if (size == 0) { /* YUV */
218 /* process the UV section */
219 offset = UVoffset;
220 size = UVsize;
222 idx++;
224 s->sg_pending_size = idx;
225 return 0;
228 static void dma_post(struct ivtv_stream *s)
230 struct ivtv *itv = s->itv;
231 struct ivtv_buffer *buf = NULL;
232 struct list_head *p;
233 u32 offset;
234 u32 *u32buf;
235 int x = 0;
237 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
238 s->name, s->dma_offset);
239 list_for_each(p, &s->q_dma.list) {
240 buf = list_entry(p, struct ivtv_buffer, list);
241 u32buf = (u32 *)buf->buf;
243 /* Sync Buffer */
244 ivtv_buf_sync_for_cpu(s, buf);
246 if (x == 0 && ivtv_use_dma(s)) {
247 offset = s->dma_last_offset;
248 if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
250 for (offset = 0; offset < 64; offset++) {
251 if (u32buf[offset] == DMA_MAGIC_COOKIE) {
252 break;
255 offset *= 4;
256 if (offset == 256) {
257 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
258 offset = s->dma_last_offset;
260 if (s->dma_last_offset != offset)
261 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
262 s->dma_last_offset = offset;
264 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
265 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
266 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
268 else {
269 write_enc_sync(0, s->dma_offset);
271 if (offset) {
272 buf->bytesused -= offset;
273 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
275 *u32buf = cpu_to_le32(s->dma_backup);
277 x++;
278 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
279 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
280 s->type == IVTV_ENC_STREAM_TYPE_VBI)
281 buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
283 if (buf)
284 buf->bytesused += s->dma_last_offset;
285 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
286 list_for_each_entry(buf, &s->q_dma.list, list) {
287 /* Parse and Groom VBI Data */
288 s->q_dma.bytesused -= buf->bytesused;
289 ivtv_process_vbi_data(itv, buf, 0, s->type);
290 s->q_dma.bytesused += buf->bytesused;
292 if (s->id == -1) {
293 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
294 return;
297 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
298 if (s->id != -1)
299 wake_up(&s->waitq);
302 void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
304 struct ivtv *itv = s->itv;
305 struct yuv_playback_info *yi = &itv->yuv_info;
306 u8 frame = yi->draw_frame;
307 struct yuv_frame_info *f = &yi->new_frame_info[frame];
308 struct ivtv_buffer *buf;
309 u32 y_size = 720 * ((f->src_h + 31) & ~31);
310 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
311 int y_done = 0;
312 int bytes_written = 0;
313 unsigned long flags = 0;
314 int idx = 0;
316 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
318 /* Insert buffer block for YUV if needed */
319 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
320 if (yi->blanking_dmaptr) {
321 s->sg_pending[idx].src = yi->blanking_dmaptr;
322 s->sg_pending[idx].dst = offset;
323 s->sg_pending[idx].size = 720 * 16;
325 offset += 720 * 16;
326 idx++;
329 list_for_each_entry(buf, &s->q_predma.list, list) {
330 /* YUV UV Offset from Y Buffer */
331 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done &&
332 (bytes_written + buf->bytesused) >= y_size) {
333 s->sg_pending[idx].src = buf->dma_handle;
334 s->sg_pending[idx].dst = offset;
335 s->sg_pending[idx].size = y_size - bytes_written;
336 offset = uv_offset;
337 if (s->sg_pending[idx].size != buf->bytesused) {
338 idx++;
339 s->sg_pending[idx].src =
340 buf->dma_handle + s->sg_pending[idx - 1].size;
341 s->sg_pending[idx].dst = offset;
342 s->sg_pending[idx].size =
343 buf->bytesused - s->sg_pending[idx - 1].size;
344 offset += s->sg_pending[idx].size;
346 y_done = 1;
347 } else {
348 s->sg_pending[idx].src = buf->dma_handle;
349 s->sg_pending[idx].dst = offset;
350 s->sg_pending[idx].size = buf->bytesused;
351 offset += buf->bytesused;
353 bytes_written += buf->bytesused;
355 /* Sync SG buffers */
356 ivtv_buf_sync_for_device(s, buf);
357 idx++;
359 s->sg_pending_size = idx;
361 /* Sync Hardware SG List of buffers */
362 ivtv_stream_sync_for_device(s);
363 if (lock)
364 spin_lock_irqsave(&itv->dma_reg_lock, flags);
365 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
366 ivtv_dma_dec_start(s);
368 else {
369 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
371 if (lock)
372 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
375 static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
377 struct ivtv *itv = s->itv;
379 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
380 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
381 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
382 s->sg_processed++;
383 /* Sync Hardware SG List of buffers */
384 ivtv_stream_sync_for_device(s);
385 write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
386 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
389 static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
391 struct ivtv *itv = s->itv;
393 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
394 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
395 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
396 s->sg_processed++;
397 /* Sync Hardware SG List of buffers */
398 ivtv_stream_sync_for_device(s);
399 write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
400 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
403 /* start the encoder DMA */
404 static void ivtv_dma_enc_start(struct ivtv_stream *s)
406 struct ivtv *itv = s->itv;
407 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
408 int i;
410 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
412 if (s->q_predma.bytesused)
413 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
415 if (ivtv_use_dma(s))
416 s->sg_pending[s->sg_pending_size - 1].size += 256;
418 /* If this is an MPEG stream, and VBI data is also pending, then append the
419 VBI DMA to the MPEG DMA and transfer both sets of data at once.
421 VBI DMA is a second class citizen compared to MPEG and mixing them together
422 will confuse the firmware (the end of a VBI DMA is seen as the end of a
423 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
424 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
425 use. This way no conflicts occur. */
426 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
427 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
428 s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
429 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
430 if (ivtv_use_dma(s_vbi))
431 s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
432 for (i = 0; i < s_vbi->sg_pending_size; i++) {
433 s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
435 s_vbi->dma_offset = s_vbi->pending_offset;
436 s_vbi->sg_pending_size = 0;
437 s_vbi->dma_xfer_cnt++;
438 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
439 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi->name);
442 s->dma_xfer_cnt++;
443 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
444 s->sg_processing_size = s->sg_pending_size;
445 s->sg_pending_size = 0;
446 s->sg_processed = 0;
447 s->dma_offset = s->pending_offset;
448 s->dma_backup = s->pending_backup;
449 s->dma_pts = s->pending_pts;
451 if (ivtv_use_pio(s)) {
452 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
453 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
454 set_bit(IVTV_F_I_PIO, &itv->i_flags);
455 itv->cur_pio_stream = s->type;
457 else {
458 itv->dma_retries = 0;
459 ivtv_dma_enc_start_xfer(s);
460 set_bit(IVTV_F_I_DMA, &itv->i_flags);
461 itv->cur_dma_stream = s->type;
462 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
463 add_timer(&itv->dma_timer);
467 static void ivtv_dma_dec_start(struct ivtv_stream *s)
469 struct ivtv *itv = s->itv;
471 if (s->q_predma.bytesused)
472 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
473 s->dma_xfer_cnt++;
474 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
475 s->sg_processing_size = s->sg_pending_size;
476 s->sg_pending_size = 0;
477 s->sg_processed = 0;
479 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
480 itv->dma_retries = 0;
481 ivtv_dma_dec_start_xfer(s);
482 set_bit(IVTV_F_I_DMA, &itv->i_flags);
483 itv->cur_dma_stream = s->type;
484 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
485 add_timer(&itv->dma_timer);
488 static void ivtv_irq_dma_read(struct ivtv *itv)
490 struct ivtv_stream *s = NULL;
491 struct ivtv_buffer *buf;
492 int hw_stream_type = 0;
494 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
495 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0) {
496 del_timer(&itv->dma_timer);
497 return;
500 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
501 s = &itv->streams[itv->cur_dma_stream];
502 ivtv_stream_sync_for_cpu(s);
504 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
505 IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
506 read_reg(IVTV_REG_DMASTATUS),
507 s->sg_processed, s->sg_processing_size, itv->dma_retries);
508 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
509 if (itv->dma_retries == 3) {
510 /* Too many retries, give up on this frame */
511 itv->dma_retries = 0;
512 s->sg_processed = s->sg_processing_size;
514 else {
515 /* Retry, starting with the first xfer segment.
516 Just retrying the current segment is not sufficient. */
517 s->sg_processed = 0;
518 itv->dma_retries++;
521 if (s->sg_processed < s->sg_processing_size) {
522 /* DMA next buffer */
523 ivtv_dma_dec_start_xfer(s);
524 return;
526 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
527 hw_stream_type = 2;
528 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
530 /* For some reason must kick the firmware, like PIO mode,
531 I think this tells the firmware we are done and the size
532 of the xfer so it can calculate what we need next.
533 I think we can do this part ourselves but would have to
534 fully calculate xfer info ourselves and not use interrupts
536 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
537 hw_stream_type);
539 /* Free last DMA call */
540 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
541 ivtv_buf_sync_for_cpu(s, buf);
542 ivtv_enqueue(s, buf, &s->q_free);
544 wake_up(&s->waitq);
546 del_timer(&itv->dma_timer);
547 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
548 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
549 itv->cur_dma_stream = -1;
550 wake_up(&itv->dma_waitq);
553 static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
555 u32 data[CX2341X_MBOX_MAX_DATA];
556 struct ivtv_stream *s;
558 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
559 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
560 if (itv->cur_dma_stream < 0) {
561 del_timer(&itv->dma_timer);
562 return;
564 s = &itv->streams[itv->cur_dma_stream];
565 ivtv_stream_sync_for_cpu(s);
567 if (data[0] & 0x18) {
568 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
569 s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
570 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
571 if (itv->dma_retries == 3) {
572 /* Too many retries, give up on this frame */
573 itv->dma_retries = 0;
574 s->sg_processed = s->sg_processing_size;
576 else {
577 /* Retry, starting with the first xfer segment.
578 Just retrying the current segment is not sufficient. */
579 s->sg_processed = 0;
580 itv->dma_retries++;
583 if (s->sg_processed < s->sg_processing_size) {
584 /* DMA next buffer */
585 ivtv_dma_enc_start_xfer(s);
586 return;
588 del_timer(&itv->dma_timer);
589 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
590 itv->cur_dma_stream = -1;
591 dma_post(s);
592 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
593 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
594 dma_post(s);
596 s->sg_processing_size = 0;
597 s->sg_processed = 0;
598 wake_up(&itv->dma_waitq);
601 static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
603 struct ivtv_stream *s;
605 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
606 itv->cur_pio_stream = -1;
607 return;
609 s = &itv->streams[itv->cur_pio_stream];
610 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
611 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
612 itv->cur_pio_stream = -1;
613 dma_post(s);
614 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
615 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
616 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
617 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
618 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
619 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
620 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
621 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
622 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
623 dma_post(s);
625 wake_up(&itv->dma_waitq);
628 static void ivtv_irq_dma_err(struct ivtv *itv)
630 u32 data[CX2341X_MBOX_MAX_DATA];
632 del_timer(&itv->dma_timer);
633 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
634 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
635 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
636 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
637 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
638 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
639 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
641 /* retry */
642 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
643 ivtv_dma_dec_start(s);
644 else
645 ivtv_dma_enc_start(s);
646 return;
648 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
649 ivtv_udma_start(itv);
650 return;
652 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
653 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
654 itv->cur_dma_stream = -1;
655 wake_up(&itv->dma_waitq);
658 static void ivtv_irq_enc_start_cap(struct ivtv *itv)
660 u32 data[CX2341X_MBOX_MAX_DATA];
661 struct ivtv_stream *s;
663 /* Get DMA destination and size arguments from card */
664 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
665 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
667 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
668 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
669 data[0], data[1], data[2]);
670 return;
672 s = &itv->streams[ivtv_stream_map[data[0]]];
673 if (!stream_enc_dma_append(s, data)) {
674 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
678 static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
680 struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
681 u32 data[CX2341X_MBOX_MAX_DATA];
682 struct ivtv_stream *s;
684 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
685 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
687 /* If more than two VBI buffers are pending, then
688 clear the old ones and start with this new one.
689 This can happen during transition stages when MPEG capturing is
690 started, but the first interrupts haven't arrived yet. During
691 that period VBI requests can accumulate without being able to
692 DMA the data. Since at most four VBI DMA buffers are available,
693 we just drop the old requests when there are already three
694 requests queued. */
695 if (s->sg_pending_size > 2) {
696 struct ivtv_buffer *buf;
697 list_for_each_entry(buf, &s->q_predma.list, list)
698 ivtv_buf_sync_for_cpu(s, buf);
699 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
700 s->sg_pending_size = 0;
702 /* if we can append the data, and the MPEG stream isn't capturing,
703 then start a DMA request for just the VBI data. */
704 if (!stream_enc_dma_append(s, data) &&
705 !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
706 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
710 static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
712 u32 data[CX2341X_MBOX_MAX_DATA];
713 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
715 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
716 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
717 !stream_enc_dma_append(s, data)) {
718 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
722 static void ivtv_irq_dec_data_req(struct ivtv *itv)
724 u32 data[CX2341X_MBOX_MAX_DATA];
725 struct ivtv_stream *s;
727 /* YUV or MPG */
728 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
730 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
731 itv->dma_data_req_size =
732 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
733 itv->dma_data_req_offset = data[1];
734 if (atomic_read(&itv->yuv_info.next_dma_frame) >= 0)
735 ivtv_yuv_frame_complete(itv);
736 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
738 else {
739 itv->dma_data_req_size = min_t(u32, data[2], 0x10000);
740 itv->dma_data_req_offset = data[1];
741 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
743 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
744 itv->dma_data_req_offset, itv->dma_data_req_size);
745 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
746 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
748 else {
749 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags))
750 ivtv_yuv_setup_stream_frame(itv);
751 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
752 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
753 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
757 static void ivtv_irq_vsync(struct ivtv *itv)
759 /* The vsync interrupt is unusual in that it won't clear until
760 * the end of the first line for the current field, at which
761 * point it clears itself. This can result in repeated vsync
762 * interrupts, or a missed vsync. Read some of the registers
763 * to determine the line being displayed and ensure we handle
764 * one vsync per frame.
766 unsigned int frame = read_reg(0x28c0) & 1;
767 struct yuv_playback_info *yi = &itv->yuv_info;
768 int last_dma_frame = atomic_read(&itv->yuv_info.next_dma_frame);
769 struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame];
771 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
773 if (((frame ^ f->sync_field) == 0 &&
774 ((itv->last_vsync_field & 1) ^ f->sync_field)) ||
775 (frame != (itv->last_vsync_field & 1) && !f->interlaced)) {
776 int next_dma_frame = last_dma_frame;
778 if (!(f->interlaced && f->delay && yi->fields_lapsed < 1)) {
779 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&yi->next_fill_frame)) {
780 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
781 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
782 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
783 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
784 next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS;
785 atomic_set(&yi->next_dma_frame, next_dma_frame);
786 yi->fields_lapsed = -1;
790 if (frame != (itv->last_vsync_field & 1)) {
791 struct ivtv_stream *s = ivtv_get_output_stream(itv);
793 itv->last_vsync_field += 1;
794 if (frame == 0) {
795 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
796 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
798 else {
799 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
801 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
802 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
803 wake_up(&itv->event_waitq);
805 wake_up(&itv->vsync_waitq);
806 if (s)
807 wake_up(&s->waitq);
809 /* Send VBI to saa7127 */
810 if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
811 test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) ||
812 test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) ||
813 test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) {
814 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
815 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
818 /* Check if we need to update the yuv registers */
819 if ((yi->yuv_forced_update || f->update) && last_dma_frame != -1) {
820 if (!f->update) {
821 last_dma_frame = (u8)(last_dma_frame - 1) % IVTV_YUV_BUFFERS;
822 f = &yi->new_frame_info[last_dma_frame];
825 if (f->src_w) {
826 yi->update_frame = last_dma_frame;
827 f->update = 0;
828 yi->yuv_forced_update = 0;
829 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
830 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
834 yi->fields_lapsed++;
838 #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
840 irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
842 struct ivtv *itv = (struct ivtv *)dev_id;
843 u32 combo;
844 u32 stat;
845 int i;
846 u8 vsync_force = 0;
848 spin_lock(&itv->dma_reg_lock);
849 /* get contents of irq status register */
850 stat = read_reg(IVTV_REG_IRQSTATUS);
852 combo = ~itv->irqmask & stat;
854 /* Clear out IRQ */
855 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
857 if (0 == combo) {
858 /* The vsync interrupt is unusual and clears itself. If we
859 * took too long, we may have missed it. Do some checks
861 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
862 /* vsync is enabled, see if we're in a new field */
863 if ((itv->last_vsync_field & 1) != (read_reg(0x28c0) & 1)) {
864 /* New field, looks like we missed it */
865 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
866 vsync_force = 1;
870 if (!vsync_force) {
871 /* No Vsync expected, wasn't for us */
872 spin_unlock(&itv->dma_reg_lock);
873 return IRQ_NONE;
877 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
878 these messages */
879 if (combo & ~0xff6d0400)
880 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
882 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
883 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
886 if (combo & IVTV_IRQ_DMA_READ) {
887 ivtv_irq_dma_read(itv);
890 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
891 ivtv_irq_enc_dma_complete(itv);
894 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
895 ivtv_irq_enc_pio_complete(itv);
898 if (combo & IVTV_IRQ_DMA_ERR) {
899 ivtv_irq_dma_err(itv);
902 if (combo & IVTV_IRQ_ENC_START_CAP) {
903 ivtv_irq_enc_start_cap(itv);
906 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
907 ivtv_irq_enc_vbi_cap(itv);
910 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
911 ivtv_irq_dec_vbi_reinsert(itv);
914 if (combo & IVTV_IRQ_ENC_EOS) {
915 IVTV_DEBUG_IRQ("ENC EOS\n");
916 set_bit(IVTV_F_I_EOS, &itv->i_flags);
917 wake_up(&itv->eos_waitq);
920 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
921 ivtv_irq_dec_data_req(itv);
924 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
925 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
926 ivtv_irq_vsync(itv);
929 if (combo & IVTV_IRQ_ENC_VIM_RST) {
930 IVTV_DEBUG_IRQ("VIM RST\n");
931 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
934 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
935 IVTV_DEBUG_INFO("Stereo mode changed\n");
938 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
939 itv->irq_rr_idx++;
940 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
941 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
942 struct ivtv_stream *s = &itv->streams[idx];
944 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
945 continue;
946 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
947 ivtv_dma_dec_start(s);
948 else
949 ivtv_dma_enc_start(s);
950 break;
952 if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) {
953 ivtv_udma_start(itv);
957 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
958 itv->irq_rr_idx++;
959 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
960 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
961 struct ivtv_stream *s = &itv->streams[idx];
963 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
964 continue;
965 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
966 ivtv_dma_enc_start(s);
967 break;
971 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
972 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
975 spin_unlock(&itv->dma_reg_lock);
977 /* If we've just handled a 'forced' vsync, it's safest to say it
978 * wasn't ours. Another device may have triggered it at just
979 * the right time.
981 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
984 void ivtv_unfinished_dma(unsigned long arg)
986 struct ivtv *itv = (struct ivtv *)arg;
988 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
989 return;
990 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
992 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
993 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
994 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
995 itv->cur_dma_stream = -1;
996 wake_up(&itv->dma_waitq);