pcmcia: CompactFlash driver for PA Semi Electra boards
[pv_ops_mirror.git] / drivers / media / video / ivtv / ivtv-irq.c
blobfd1688e4757dc1fefa51f4a49461a4af0039a3aa
1 /* interrupt handling
2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "ivtv-driver.h"
22 #include "ivtv-queue.h"
23 #include "ivtv-udma.h"
24 #include "ivtv-irq.h"
25 #include "ivtv-mailbox.h"
26 #include "ivtv-vbi.h"
27 #include "ivtv-yuv.h"
29 #define DMA_MAGIC_COOKIE 0x000001fe
31 static void ivtv_dma_dec_start(struct ivtv_stream *s);
33 static const int ivtv_stream_map[] = {
34 IVTV_ENC_STREAM_TYPE_MPG,
35 IVTV_ENC_STREAM_TYPE_YUV,
36 IVTV_ENC_STREAM_TYPE_PCM,
37 IVTV_ENC_STREAM_TYPE_VBI,
41 static void ivtv_pio_work_handler(struct ivtv *itv)
43 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
44 struct ivtv_buffer *buf;
45 int i = 0;
47 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
48 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
49 s->v4l2dev == NULL || !ivtv_use_pio(s)) {
50 itv->cur_pio_stream = -1;
51 /* trigger PIO complete user interrupt */
52 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
53 return;
55 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
56 list_for_each_entry(buf, &s->q_dma.list, list) {
57 u32 size = s->sg_processing[i].size & 0x3ffff;
59 /* Copy the data from the card to the buffer */
60 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
61 memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
63 else {
64 memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
66 i++;
67 if (i == s->sg_processing_size)
68 break;
70 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
73 void ivtv_irq_work_handler(struct work_struct *work)
75 struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
77 DEFINE_WAIT(wait);
79 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
80 ivtv_pio_work_handler(itv);
82 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
83 ivtv_vbi_work_handler(itv);
85 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
86 ivtv_yuv_work_handler(itv);
89 /* Determine the required DMA size, setup enough buffers in the predma queue and
90 actually copy the data from the card to the buffers in case a PIO transfer is
91 required for this stream.
93 static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
95 struct ivtv *itv = s->itv;
96 struct ivtv_buffer *buf;
97 u32 bytes_needed = 0;
98 u32 offset, size;
99 u32 UVoffset = 0, UVsize = 0;
100 int skip_bufs = s->q_predma.buffers;
101 int idx = s->sg_pending_size;
102 int rc;
104 /* sanity checks */
105 if (s->v4l2dev == NULL) {
106 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
107 return -1;
109 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
110 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
111 return -1;
114 /* determine offset, size and PTS for the various streams */
115 switch (s->type) {
116 case IVTV_ENC_STREAM_TYPE_MPG:
117 offset = data[1];
118 size = data[2];
119 s->pending_pts = 0;
120 break;
122 case IVTV_ENC_STREAM_TYPE_YUV:
123 offset = data[1];
124 size = data[2];
125 UVoffset = data[3];
126 UVsize = data[4];
127 s->pending_pts = ((u64) data[5] << 32) | data[6];
128 break;
130 case IVTV_ENC_STREAM_TYPE_PCM:
131 offset = data[1] + 12;
132 size = data[2] - 12;
133 s->pending_pts = read_dec(offset - 8) |
134 ((u64)(read_dec(offset - 12)) << 32);
135 if (itv->has_cx23415)
136 offset += IVTV_DECODER_OFFSET;
137 break;
139 case IVTV_ENC_STREAM_TYPE_VBI:
140 size = itv->vbi.enc_size * itv->vbi.fpi;
141 offset = read_enc(itv->vbi.enc_start - 4) + 12;
142 if (offset == 12) {
143 IVTV_DEBUG_INFO("VBI offset == 0\n");
144 return -1;
146 s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
147 break;
149 case IVTV_DEC_STREAM_TYPE_VBI:
150 size = read_dec(itv->vbi.dec_start + 4) + 8;
151 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
152 s->pending_pts = 0;
153 offset += IVTV_DECODER_OFFSET;
154 break;
155 default:
156 /* shouldn't happen */
157 return -1;
160 /* if this is the start of the DMA then fill in the magic cookie */
161 if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
162 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
163 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
164 s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
165 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
167 else {
168 s->pending_backup = read_enc(offset);
169 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
171 s->pending_offset = offset;
174 bytes_needed = size;
175 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
176 /* The size for the Y samples needs to be rounded upwards to a
177 multiple of the buf_size. The UV samples then start in the
178 next buffer. */
179 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
180 bytes_needed += UVsize;
183 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
184 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
186 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
187 if (rc < 0) { /* Insufficient buffers */
188 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
189 bytes_needed, s->name);
190 return -1;
192 if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) {
193 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
194 IVTV_WARN("Cause: the application is not reading fast enough.\n");
196 s->buffers_stolen = rc;
198 /* got the buffers, now fill in sg_pending */
199 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
200 memset(buf->buf, 0, 128);
201 list_for_each_entry(buf, &s->q_predma.list, list) {
202 if (skip_bufs-- > 0)
203 continue;
204 s->sg_pending[idx].dst = buf->dma_handle;
205 s->sg_pending[idx].src = offset;
206 s->sg_pending[idx].size = s->buf_size;
207 buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
208 buf->dma_xfer_cnt = s->dma_xfer_cnt;
210 s->q_predma.bytesused += buf->bytesused;
211 size -= buf->bytesused;
212 offset += s->buf_size;
214 /* Sync SG buffers */
215 ivtv_buf_sync_for_device(s, buf);
217 if (size == 0) { /* YUV */
218 /* process the UV section */
219 offset = UVoffset;
220 size = UVsize;
222 idx++;
224 s->sg_pending_size = idx;
225 return 0;
228 static void dma_post(struct ivtv_stream *s)
230 struct ivtv *itv = s->itv;
231 struct ivtv_buffer *buf = NULL;
232 struct list_head *p;
233 u32 offset;
234 u32 *u32buf;
235 int x = 0;
237 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
238 s->name, s->dma_offset);
239 list_for_each(p, &s->q_dma.list) {
240 buf = list_entry(p, struct ivtv_buffer, list);
241 u32buf = (u32 *)buf->buf;
243 /* Sync Buffer */
244 ivtv_buf_sync_for_cpu(s, buf);
246 if (x == 0 && ivtv_use_dma(s)) {
247 offset = s->dma_last_offset;
248 if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
250 for (offset = 0; offset < 64; offset++) {
251 if (u32buf[offset] == DMA_MAGIC_COOKIE) {
252 break;
255 offset *= 4;
256 if (offset == 256) {
257 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
258 offset = s->dma_last_offset;
260 if (s->dma_last_offset != offset)
261 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
262 s->dma_last_offset = offset;
264 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
265 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
266 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
268 else {
269 write_enc_sync(0, s->dma_offset);
271 if (offset) {
272 buf->bytesused -= offset;
273 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
275 *u32buf = cpu_to_le32(s->dma_backup);
277 x++;
278 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
279 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
280 s->type == IVTV_ENC_STREAM_TYPE_VBI)
281 buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
283 if (buf)
284 buf->bytesused += s->dma_last_offset;
285 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
286 list_for_each_entry(buf, &s->q_dma.list, list) {
287 /* Parse and Groom VBI Data */
288 s->q_dma.bytesused -= buf->bytesused;
289 ivtv_process_vbi_data(itv, buf, 0, s->type);
290 s->q_dma.bytesused += buf->bytesused;
292 if (s->id == -1) {
293 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
294 return;
297 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
298 if (s->id != -1)
299 wake_up(&s->waitq);
302 void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
304 struct ivtv *itv = s->itv;
305 struct ivtv_buffer *buf;
306 u32 y_size = itv->params.height * itv->params.width;
307 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
308 int y_done = 0;
309 int bytes_written = 0;
310 unsigned long flags = 0;
311 int idx = 0;
313 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
314 list_for_each_entry(buf, &s->q_predma.list, list) {
315 /* YUV UV Offset from Y Buffer */
316 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done && bytes_written >= y_size) {
317 offset = uv_offset;
318 y_done = 1;
320 s->sg_pending[idx].src = buf->dma_handle;
321 s->sg_pending[idx].dst = offset;
322 s->sg_pending[idx].size = buf->bytesused;
324 offset += buf->bytesused;
325 bytes_written += buf->bytesused;
327 /* Sync SG buffers */
328 ivtv_buf_sync_for_device(s, buf);
329 idx++;
331 s->sg_pending_size = idx;
333 /* Sync Hardware SG List of buffers */
334 ivtv_stream_sync_for_device(s);
335 if (lock)
336 spin_lock_irqsave(&itv->dma_reg_lock, flags);
337 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
338 ivtv_dma_dec_start(s);
340 else {
341 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
343 if (lock)
344 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
347 static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
349 struct ivtv *itv = s->itv;
351 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
352 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
353 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
354 s->sg_processed++;
355 /* Sync Hardware SG List of buffers */
356 ivtv_stream_sync_for_device(s);
357 write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
358 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
361 static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
363 struct ivtv *itv = s->itv;
365 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
366 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
367 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
368 s->sg_processed++;
369 /* Sync Hardware SG List of buffers */
370 ivtv_stream_sync_for_device(s);
371 write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
372 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
375 /* start the encoder DMA */
376 static void ivtv_dma_enc_start(struct ivtv_stream *s)
378 struct ivtv *itv = s->itv;
379 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
380 int i;
382 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
384 if (s->q_predma.bytesused)
385 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
387 if (ivtv_use_dma(s))
388 s->sg_pending[s->sg_pending_size - 1].size += 256;
390 /* If this is an MPEG stream, and VBI data is also pending, then append the
391 VBI DMA to the MPEG DMA and transfer both sets of data at once.
393 VBI DMA is a second class citizen compared to MPEG and mixing them together
394 will confuse the firmware (the end of a VBI DMA is seen as the end of a
395 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
396 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
397 use. This way no conflicts occur. */
398 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
399 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
400 s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
401 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
402 if (ivtv_use_dma(s_vbi))
403 s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
404 for (i = 0; i < s_vbi->sg_pending_size; i++) {
405 s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
407 s_vbi->dma_offset = s_vbi->pending_offset;
408 s_vbi->sg_pending_size = 0;
409 s_vbi->dma_xfer_cnt++;
410 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
411 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name);
414 s->dma_xfer_cnt++;
415 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
416 s->sg_processing_size = s->sg_pending_size;
417 s->sg_pending_size = 0;
418 s->sg_processed = 0;
419 s->dma_offset = s->pending_offset;
420 s->dma_backup = s->pending_backup;
421 s->dma_pts = s->pending_pts;
423 if (ivtv_use_pio(s)) {
424 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
425 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
426 set_bit(IVTV_F_I_PIO, &itv->i_flags);
427 itv->cur_pio_stream = s->type;
429 else {
430 itv->dma_retries = 0;
431 ivtv_dma_enc_start_xfer(s);
432 set_bit(IVTV_F_I_DMA, &itv->i_flags);
433 itv->cur_dma_stream = s->type;
434 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
435 add_timer(&itv->dma_timer);
439 static void ivtv_dma_dec_start(struct ivtv_stream *s)
441 struct ivtv *itv = s->itv;
443 if (s->q_predma.bytesused)
444 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
445 s->dma_xfer_cnt++;
446 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
447 s->sg_processing_size = s->sg_pending_size;
448 s->sg_pending_size = 0;
449 s->sg_processed = 0;
451 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
452 itv->dma_retries = 0;
453 ivtv_dma_dec_start_xfer(s);
454 set_bit(IVTV_F_I_DMA, &itv->i_flags);
455 itv->cur_dma_stream = s->type;
456 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
457 add_timer(&itv->dma_timer);
460 static void ivtv_irq_dma_read(struct ivtv *itv)
462 struct ivtv_stream *s = NULL;
463 struct ivtv_buffer *buf;
464 int hw_stream_type = 0;
466 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
467 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0) {
468 del_timer(&itv->dma_timer);
469 return;
472 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
473 s = &itv->streams[itv->cur_dma_stream];
474 ivtv_stream_sync_for_cpu(s);
476 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
477 IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
478 read_reg(IVTV_REG_DMASTATUS),
479 s->sg_processed, s->sg_processing_size, itv->dma_retries);
480 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
481 if (itv->dma_retries == 3) {
482 /* Too many retries, give up on this frame */
483 itv->dma_retries = 0;
484 s->sg_processed = s->sg_processing_size;
486 else {
487 /* Retry, starting with the first xfer segment.
488 Just retrying the current segment is not sufficient. */
489 s->sg_processed = 0;
490 itv->dma_retries++;
493 if (s->sg_processed < s->sg_processing_size) {
494 /* DMA next buffer */
495 ivtv_dma_dec_start_xfer(s);
496 return;
498 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
499 hw_stream_type = 2;
500 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
502 /* For some reason must kick the firmware, like PIO mode,
503 I think this tells the firmware we are done and the size
504 of the xfer so it can calculate what we need next.
505 I think we can do this part ourselves but would have to
506 fully calculate xfer info ourselves and not use interrupts
508 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
509 hw_stream_type);
511 /* Free last DMA call */
512 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
513 ivtv_buf_sync_for_cpu(s, buf);
514 ivtv_enqueue(s, buf, &s->q_free);
516 wake_up(&s->waitq);
518 del_timer(&itv->dma_timer);
519 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
520 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
521 itv->cur_dma_stream = -1;
522 wake_up(&itv->dma_waitq);
525 static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
527 u32 data[CX2341X_MBOX_MAX_DATA];
528 struct ivtv_stream *s;
530 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
531 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
532 if (itv->cur_dma_stream < 0) {
533 del_timer(&itv->dma_timer);
534 return;
536 s = &itv->streams[itv->cur_dma_stream];
537 ivtv_stream_sync_for_cpu(s);
539 if (data[0] & 0x18) {
540 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
541 s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
542 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
543 if (itv->dma_retries == 3) {
544 /* Too many retries, give up on this frame */
545 itv->dma_retries = 0;
546 s->sg_processed = s->sg_processing_size;
548 else {
549 /* Retry, starting with the first xfer segment.
550 Just retrying the current segment is not sufficient. */
551 s->sg_processed = 0;
552 itv->dma_retries++;
555 if (s->sg_processed < s->sg_processing_size) {
556 /* DMA next buffer */
557 ivtv_dma_enc_start_xfer(s);
558 return;
560 del_timer(&itv->dma_timer);
561 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
562 itv->cur_dma_stream = -1;
563 dma_post(s);
564 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
565 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
566 dma_post(s);
568 s->sg_processing_size = 0;
569 s->sg_processed = 0;
570 wake_up(&itv->dma_waitq);
573 static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
575 struct ivtv_stream *s;
577 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
578 itv->cur_pio_stream = -1;
579 return;
581 s = &itv->streams[itv->cur_pio_stream];
582 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
583 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
584 itv->cur_pio_stream = -1;
585 dma_post(s);
586 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
587 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
588 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
589 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
590 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
591 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
592 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
593 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
594 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
595 dma_post(s);
597 wake_up(&itv->dma_waitq);
600 static void ivtv_irq_dma_err(struct ivtv *itv)
602 u32 data[CX2341X_MBOX_MAX_DATA];
604 del_timer(&itv->dma_timer);
605 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
606 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
607 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
608 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
609 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
610 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
611 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
613 /* retry */
614 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
615 ivtv_dma_dec_start(s);
616 else
617 ivtv_dma_enc_start(s);
618 return;
620 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
621 ivtv_udma_start(itv);
622 return;
624 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
625 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
626 itv->cur_dma_stream = -1;
627 wake_up(&itv->dma_waitq);
630 static void ivtv_irq_enc_start_cap(struct ivtv *itv)
632 u32 data[CX2341X_MBOX_MAX_DATA];
633 struct ivtv_stream *s;
635 /* Get DMA destination and size arguments from card */
636 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
637 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
639 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
640 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
641 data[0], data[1], data[2]);
642 return;
644 s = &itv->streams[ivtv_stream_map[data[0]]];
645 if (!stream_enc_dma_append(s, data)) {
646 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
650 static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
652 struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
653 u32 data[CX2341X_MBOX_MAX_DATA];
654 struct ivtv_stream *s;
656 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
657 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
659 /* If more than two VBI buffers are pending, then
660 clear the old ones and start with this new one.
661 This can happen during transition stages when MPEG capturing is
662 started, but the first interrupts haven't arrived yet. During
663 that period VBI requests can accumulate without being able to
664 DMA the data. Since at most four VBI DMA buffers are available,
665 we just drop the old requests when there are already three
666 requests queued. */
667 if (s->sg_pending_size > 2) {
668 struct ivtv_buffer *buf;
669 list_for_each_entry(buf, &s->q_predma.list, list)
670 ivtv_buf_sync_for_cpu(s, buf);
671 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
672 s->sg_pending_size = 0;
674 /* if we can append the data, and the MPEG stream isn't capturing,
675 then start a DMA request for just the VBI data. */
676 if (!stream_enc_dma_append(s, data) &&
677 !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
678 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
682 static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
684 u32 data[CX2341X_MBOX_MAX_DATA];
685 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
687 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
688 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
689 !stream_enc_dma_append(s, data)) {
690 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
694 static void ivtv_irq_dec_data_req(struct ivtv *itv)
696 u32 data[CX2341X_MBOX_MAX_DATA];
697 struct ivtv_stream *s;
699 /* YUV or MPG */
700 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
702 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
703 itv->dma_data_req_size = itv->params.width * itv->params.height * 3 / 2;
704 itv->dma_data_req_offset = data[1] ? data[1] : yuv_offset[0];
705 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
707 else {
708 itv->dma_data_req_size = data[2] >= 0x10000 ? 0x10000 : data[2];
709 itv->dma_data_req_offset = data[1];
710 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
712 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
713 itv->dma_data_req_offset, itv->dma_data_req_size);
714 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
715 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
717 else {
718 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
719 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
720 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
724 static void ivtv_irq_vsync(struct ivtv *itv)
726 /* The vsync interrupt is unusual in that it won't clear until
727 * the end of the first line for the current field, at which
728 * point it clears itself. This can result in repeated vsync
729 * interrupts, or a missed vsync. Read some of the registers
730 * to determine the line being displayed and ensure we handle
731 * one vsync per frame.
733 unsigned int frame = read_reg(0x28c0) & 1;
734 int last_dma_frame = atomic_read(&itv->yuv_info.next_dma_frame);
736 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
738 if (((frame ^ itv->yuv_info.sync_field[last_dma_frame]) == 0 &&
739 ((itv->last_vsync_field & 1) ^ itv->yuv_info.sync_field[last_dma_frame])) ||
740 (frame != (itv->last_vsync_field & 1) && !itv->yuv_info.frame_interlaced)) {
741 int next_dma_frame = last_dma_frame;
743 if (!(itv->yuv_info.frame_interlaced && itv->yuv_info.field_delay[next_dma_frame] && itv->yuv_info.fields_lapsed < 1)) {
744 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&itv->yuv_info.next_fill_frame)) {
745 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
746 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
747 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
748 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
749 next_dma_frame = (next_dma_frame + 1) & 0x3;
750 atomic_set(&itv->yuv_info.next_dma_frame, next_dma_frame);
751 itv->yuv_info.fields_lapsed = -1;
755 if (frame != (itv->last_vsync_field & 1)) {
756 struct ivtv_stream *s = ivtv_get_output_stream(itv);
758 itv->last_vsync_field += 1;
759 if (frame == 0) {
760 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
761 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
763 else {
764 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
766 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
767 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
768 wake_up(&itv->event_waitq);
770 wake_up(&itv->vsync_waitq);
771 if (s)
772 wake_up(&s->waitq);
774 /* Send VBI to saa7127 */
775 if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
776 test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) ||
777 test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) ||
778 test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) {
779 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
780 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
783 /* Check if we need to update the yuv registers */
784 if ((itv->yuv_info.yuv_forced_update || itv->yuv_info.new_frame_info[last_dma_frame].update) && last_dma_frame != -1) {
785 if (!itv->yuv_info.new_frame_info[last_dma_frame].update)
786 last_dma_frame = (last_dma_frame - 1) & 3;
788 if (itv->yuv_info.new_frame_info[last_dma_frame].src_w) {
789 itv->yuv_info.update_frame = last_dma_frame;
790 itv->yuv_info.new_frame_info[last_dma_frame].update = 0;
791 itv->yuv_info.yuv_forced_update = 0;
792 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
793 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
797 itv->yuv_info.fields_lapsed ++;
801 #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
803 irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
805 struct ivtv *itv = (struct ivtv *)dev_id;
806 u32 combo;
807 u32 stat;
808 int i;
809 u8 vsync_force = 0;
811 spin_lock(&itv->dma_reg_lock);
812 /* get contents of irq status register */
813 stat = read_reg(IVTV_REG_IRQSTATUS);
815 combo = ~itv->irqmask & stat;
817 /* Clear out IRQ */
818 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
820 if (0 == combo) {
821 /* The vsync interrupt is unusual and clears itself. If we
822 * took too long, we may have missed it. Do some checks
824 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
825 /* vsync is enabled, see if we're in a new field */
826 if ((itv->last_vsync_field & 1) != (read_reg(0x28c0) & 1)) {
827 /* New field, looks like we missed it */
828 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
829 vsync_force = 1;
833 if (!vsync_force) {
834 /* No Vsync expected, wasn't for us */
835 spin_unlock(&itv->dma_reg_lock);
836 return IRQ_NONE;
840 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
841 these messages */
842 if (combo & ~0xff6d0400)
843 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
845 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
846 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
849 if (combo & IVTV_IRQ_DMA_READ) {
850 ivtv_irq_dma_read(itv);
853 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
854 ivtv_irq_enc_dma_complete(itv);
857 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
858 ivtv_irq_enc_pio_complete(itv);
861 if (combo & IVTV_IRQ_DMA_ERR) {
862 ivtv_irq_dma_err(itv);
865 if (combo & IVTV_IRQ_ENC_START_CAP) {
866 ivtv_irq_enc_start_cap(itv);
869 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
870 ivtv_irq_enc_vbi_cap(itv);
873 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
874 ivtv_irq_dec_vbi_reinsert(itv);
877 if (combo & IVTV_IRQ_ENC_EOS) {
878 IVTV_DEBUG_IRQ("ENC EOS\n");
879 set_bit(IVTV_F_I_EOS, &itv->i_flags);
880 wake_up(&itv->eos_waitq);
883 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
884 ivtv_irq_dec_data_req(itv);
887 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
888 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
889 ivtv_irq_vsync(itv);
892 if (combo & IVTV_IRQ_ENC_VIM_RST) {
893 IVTV_DEBUG_IRQ("VIM RST\n");
894 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
897 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
898 IVTV_DEBUG_INFO("Stereo mode changed\n");
901 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
902 itv->irq_rr_idx++;
903 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
904 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
905 struct ivtv_stream *s = &itv->streams[idx];
907 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
908 continue;
909 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
910 ivtv_dma_dec_start(s);
911 else
912 ivtv_dma_enc_start(s);
913 break;
915 if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) {
916 ivtv_udma_start(itv);
920 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
921 itv->irq_rr_idx++;
922 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
923 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
924 struct ivtv_stream *s = &itv->streams[idx];
926 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
927 continue;
928 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
929 ivtv_dma_enc_start(s);
930 break;
934 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
935 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
938 spin_unlock(&itv->dma_reg_lock);
940 /* If we've just handled a 'forced' vsync, it's safest to say it
941 * wasn't ours. Another device may have triggered it at just
942 * the right time.
944 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
947 void ivtv_unfinished_dma(unsigned long arg)
949 struct ivtv *itv = (struct ivtv *)arg;
951 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
952 return;
953 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
955 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
956 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
957 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
958 itv->cur_dma_stream = -1;
959 wake_up(&itv->dma_waitq);