Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / drivers / media / video / cx23885 / cx23885-core.c
blob6ad227029a0f2b497f9b7853891fb107b85c192e
1 /*
2 * Driver for the Conexant CX23885 PCIe bridge
4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/kmod.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <asm/div64.h>
32 #include <linux/firmware.h>
34 #include "cx23885.h"
35 #include "cimax2.h"
36 #include "altera-ci.h"
37 #include "cx23888-ir.h"
38 #include "cx23885-ir.h"
39 #include "cx23885-av.h"
40 #include "cx23885-input.h"
42 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
43 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(CX23885_VERSION);
47 static unsigned int debug;
48 module_param(debug, int, 0644);
49 MODULE_PARM_DESC(debug, "enable debug messages");
51 static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
52 module_param_array(card, int, NULL, 0444);
53 MODULE_PARM_DESC(card, "card type");
55 #define dprintk(level, fmt, arg...)\
56 do { if (debug >= level)\
57 printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
58 } while (0)
60 static unsigned int cx23885_devcount;
62 #define NO_SYNC_LINE (-1U)
64 /* FIXME, these allocations will change when
65 * analog arrives. The be reviewed.
66 * CX23887 Assumptions
67 * 1 line = 16 bytes of CDT
68 * cmds size = 80
69 * cdt size = 16 * linesize
70 * iqsize = 64
71 * maxlines = 6
73 * Address Space:
74 * 0x00000000 0x00008fff FIFO clusters
75 * 0x00010000 0x000104af Channel Management Data Structures
76 * 0x000104b0 0x000104ff Free
77 * 0x00010500 0x000108bf 15 channels * iqsize
78 * 0x000108c0 0x000108ff Free
79 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
80 * 15 channels * (iqsize + (maxlines * linesize))
81 * 0x00010ea0 0x00010xxx Free
84 static struct sram_channel cx23885_sram_channels[] = {
85 [SRAM_CH01] = {
86 .name = "VID A",
87 .cmds_start = 0x10000,
88 .ctrl_start = 0x10380,
89 .cdt = 0x104c0,
90 .fifo_start = 0x40,
91 .fifo_size = 0x2800,
92 .ptr1_reg = DMA1_PTR1,
93 .ptr2_reg = DMA1_PTR2,
94 .cnt1_reg = DMA1_CNT1,
95 .cnt2_reg = DMA1_CNT2,
97 [SRAM_CH02] = {
98 .name = "ch2",
99 .cmds_start = 0x0,
100 .ctrl_start = 0x0,
101 .cdt = 0x0,
102 .fifo_start = 0x0,
103 .fifo_size = 0x0,
104 .ptr1_reg = DMA2_PTR1,
105 .ptr2_reg = DMA2_PTR2,
106 .cnt1_reg = DMA2_CNT1,
107 .cnt2_reg = DMA2_CNT2,
109 [SRAM_CH03] = {
110 .name = "TS1 B",
111 .cmds_start = 0x100A0,
112 .ctrl_start = 0x10400,
113 .cdt = 0x10580,
114 .fifo_start = 0x5000,
115 .fifo_size = 0x1000,
116 .ptr1_reg = DMA3_PTR1,
117 .ptr2_reg = DMA3_PTR2,
118 .cnt1_reg = DMA3_CNT1,
119 .cnt2_reg = DMA3_CNT2,
121 [SRAM_CH04] = {
122 .name = "ch4",
123 .cmds_start = 0x0,
124 .ctrl_start = 0x0,
125 .cdt = 0x0,
126 .fifo_start = 0x0,
127 .fifo_size = 0x0,
128 .ptr1_reg = DMA4_PTR1,
129 .ptr2_reg = DMA4_PTR2,
130 .cnt1_reg = DMA4_CNT1,
131 .cnt2_reg = DMA4_CNT2,
133 [SRAM_CH05] = {
134 .name = "ch5",
135 .cmds_start = 0x0,
136 .ctrl_start = 0x0,
137 .cdt = 0x0,
138 .fifo_start = 0x0,
139 .fifo_size = 0x0,
140 .ptr1_reg = DMA5_PTR1,
141 .ptr2_reg = DMA5_PTR2,
142 .cnt1_reg = DMA5_CNT1,
143 .cnt2_reg = DMA5_CNT2,
145 [SRAM_CH06] = {
146 .name = "TS2 C",
147 .cmds_start = 0x10140,
148 .ctrl_start = 0x10440,
149 .cdt = 0x105e0,
150 .fifo_start = 0x6000,
151 .fifo_size = 0x1000,
152 .ptr1_reg = DMA5_PTR1,
153 .ptr2_reg = DMA5_PTR2,
154 .cnt1_reg = DMA5_CNT1,
155 .cnt2_reg = DMA5_CNT2,
157 [SRAM_CH07] = {
158 .name = "TV Audio",
159 .cmds_start = 0x10190,
160 .ctrl_start = 0x10480,
161 .cdt = 0x10a00,
162 .fifo_start = 0x7000,
163 .fifo_size = 0x1000,
164 .ptr1_reg = DMA6_PTR1,
165 .ptr2_reg = DMA6_PTR2,
166 .cnt1_reg = DMA6_CNT1,
167 .cnt2_reg = DMA6_CNT2,
169 [SRAM_CH08] = {
170 .name = "ch8",
171 .cmds_start = 0x0,
172 .ctrl_start = 0x0,
173 .cdt = 0x0,
174 .fifo_start = 0x0,
175 .fifo_size = 0x0,
176 .ptr1_reg = DMA7_PTR1,
177 .ptr2_reg = DMA7_PTR2,
178 .cnt1_reg = DMA7_CNT1,
179 .cnt2_reg = DMA7_CNT2,
181 [SRAM_CH09] = {
182 .name = "ch9",
183 .cmds_start = 0x0,
184 .ctrl_start = 0x0,
185 .cdt = 0x0,
186 .fifo_start = 0x0,
187 .fifo_size = 0x0,
188 .ptr1_reg = DMA8_PTR1,
189 .ptr2_reg = DMA8_PTR2,
190 .cnt1_reg = DMA8_CNT1,
191 .cnt2_reg = DMA8_CNT2,
195 static struct sram_channel cx23887_sram_channels[] = {
196 [SRAM_CH01] = {
197 .name = "VID A",
198 .cmds_start = 0x10000,
199 .ctrl_start = 0x105b0,
200 .cdt = 0x107b0,
201 .fifo_start = 0x40,
202 .fifo_size = 0x2800,
203 .ptr1_reg = DMA1_PTR1,
204 .ptr2_reg = DMA1_PTR2,
205 .cnt1_reg = DMA1_CNT1,
206 .cnt2_reg = DMA1_CNT2,
208 [SRAM_CH02] = {
209 .name = "VID A (VBI)",
210 .cmds_start = 0x10050,
211 .ctrl_start = 0x105F0,
212 .cdt = 0x10810,
213 .fifo_start = 0x3000,
214 .fifo_size = 0x1000,
215 .ptr1_reg = DMA2_PTR1,
216 .ptr2_reg = DMA2_PTR2,
217 .cnt1_reg = DMA2_CNT1,
218 .cnt2_reg = DMA2_CNT2,
220 [SRAM_CH03] = {
221 .name = "TS1 B",
222 .cmds_start = 0x100A0,
223 .ctrl_start = 0x10630,
224 .cdt = 0x10870,
225 .fifo_start = 0x5000,
226 .fifo_size = 0x1000,
227 .ptr1_reg = DMA3_PTR1,
228 .ptr2_reg = DMA3_PTR2,
229 .cnt1_reg = DMA3_CNT1,
230 .cnt2_reg = DMA3_CNT2,
232 [SRAM_CH04] = {
233 .name = "ch4",
234 .cmds_start = 0x0,
235 .ctrl_start = 0x0,
236 .cdt = 0x0,
237 .fifo_start = 0x0,
238 .fifo_size = 0x0,
239 .ptr1_reg = DMA4_PTR1,
240 .ptr2_reg = DMA4_PTR2,
241 .cnt1_reg = DMA4_CNT1,
242 .cnt2_reg = DMA4_CNT2,
244 [SRAM_CH05] = {
245 .name = "ch5",
246 .cmds_start = 0x0,
247 .ctrl_start = 0x0,
248 .cdt = 0x0,
249 .fifo_start = 0x0,
250 .fifo_size = 0x0,
251 .ptr1_reg = DMA5_PTR1,
252 .ptr2_reg = DMA5_PTR2,
253 .cnt1_reg = DMA5_CNT1,
254 .cnt2_reg = DMA5_CNT2,
256 [SRAM_CH06] = {
257 .name = "TS2 C",
258 .cmds_start = 0x10140,
259 .ctrl_start = 0x10670,
260 .cdt = 0x108d0,
261 .fifo_start = 0x6000,
262 .fifo_size = 0x1000,
263 .ptr1_reg = DMA5_PTR1,
264 .ptr2_reg = DMA5_PTR2,
265 .cnt1_reg = DMA5_CNT1,
266 .cnt2_reg = DMA5_CNT2,
268 [SRAM_CH07] = {
269 .name = "TV Audio",
270 .cmds_start = 0x10190,
271 .ctrl_start = 0x106B0,
272 .cdt = 0x10930,
273 .fifo_start = 0x7000,
274 .fifo_size = 0x1000,
275 .ptr1_reg = DMA6_PTR1,
276 .ptr2_reg = DMA6_PTR2,
277 .cnt1_reg = DMA6_CNT1,
278 .cnt2_reg = DMA6_CNT2,
280 [SRAM_CH08] = {
281 .name = "ch8",
282 .cmds_start = 0x0,
283 .ctrl_start = 0x0,
284 .cdt = 0x0,
285 .fifo_start = 0x0,
286 .fifo_size = 0x0,
287 .ptr1_reg = DMA7_PTR1,
288 .ptr2_reg = DMA7_PTR2,
289 .cnt1_reg = DMA7_CNT1,
290 .cnt2_reg = DMA7_CNT2,
292 [SRAM_CH09] = {
293 .name = "ch9",
294 .cmds_start = 0x0,
295 .ctrl_start = 0x0,
296 .cdt = 0x0,
297 .fifo_start = 0x0,
298 .fifo_size = 0x0,
299 .ptr1_reg = DMA8_PTR1,
300 .ptr2_reg = DMA8_PTR2,
301 .cnt1_reg = DMA8_CNT1,
302 .cnt2_reg = DMA8_CNT2,
306 void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
308 unsigned long flags;
309 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
311 dev->pci_irqmask |= mask;
313 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
316 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
318 unsigned long flags;
319 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
321 dev->pci_irqmask |= mask;
322 cx_set(PCI_INT_MSK, mask);
324 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
327 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
329 u32 v;
330 unsigned long flags;
331 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
333 v = mask & dev->pci_irqmask;
334 if (v)
335 cx_set(PCI_INT_MSK, v);
337 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
340 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
342 cx23885_irq_enable(dev, 0xffffffff);
345 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
347 unsigned long flags;
348 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
350 cx_clear(PCI_INT_MSK, mask);
352 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
355 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
357 cx23885_irq_disable(dev, 0xffffffff);
360 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
362 unsigned long flags;
363 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
365 dev->pci_irqmask &= ~mask;
366 cx_clear(PCI_INT_MSK, mask);
368 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
371 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
373 u32 v;
374 unsigned long flags;
375 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
377 v = cx_read(PCI_INT_MSK);
379 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
380 return v;
383 static int cx23885_risc_decode(u32 risc)
385 static char *instr[16] = {
386 [RISC_SYNC >> 28] = "sync",
387 [RISC_WRITE >> 28] = "write",
388 [RISC_WRITEC >> 28] = "writec",
389 [RISC_READ >> 28] = "read",
390 [RISC_READC >> 28] = "readc",
391 [RISC_JUMP >> 28] = "jump",
392 [RISC_SKIP >> 28] = "skip",
393 [RISC_WRITERM >> 28] = "writerm",
394 [RISC_WRITECM >> 28] = "writecm",
395 [RISC_WRITECR >> 28] = "writecr",
397 static int incr[16] = {
398 [RISC_WRITE >> 28] = 3,
399 [RISC_JUMP >> 28] = 3,
400 [RISC_SKIP >> 28] = 1,
401 [RISC_SYNC >> 28] = 1,
402 [RISC_WRITERM >> 28] = 3,
403 [RISC_WRITECM >> 28] = 3,
404 [RISC_WRITECR >> 28] = 4,
406 static char *bits[] = {
407 "12", "13", "14", "resync",
408 "cnt0", "cnt1", "18", "19",
409 "20", "21", "22", "23",
410 "irq1", "irq2", "eol", "sol",
412 int i;
414 printk("0x%08x [ %s", risc,
415 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
416 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
417 if (risc & (1 << (i + 12)))
418 printk(" %s", bits[i]);
419 printk(" count=%d ]\n", risc & 0xfff);
420 return incr[risc >> 28] ? incr[risc >> 28] : 1;
423 void cx23885_wakeup(struct cx23885_tsport *port,
424 struct cx23885_dmaqueue *q, u32 count)
426 struct cx23885_dev *dev = port->dev;
427 struct cx23885_buffer *buf;
428 int bc;
430 for (bc = 0;; bc++) {
431 if (list_empty(&q->active))
432 break;
433 buf = list_entry(q->active.next,
434 struct cx23885_buffer, vb.queue);
436 /* count comes from the hw and is is 16bit wide --
437 * this trick handles wrap-arounds correctly for
438 * up to 32767 buffers in flight... */
439 if ((s16) (count - buf->count) < 0)
440 break;
442 do_gettimeofday(&buf->vb.ts);
443 dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.i,
444 count, buf->count);
445 buf->vb.state = VIDEOBUF_DONE;
446 list_del(&buf->vb.queue);
447 wake_up(&buf->vb.done);
449 if (list_empty(&q->active))
450 del_timer(&q->timeout);
451 else
452 mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
453 if (bc != 1)
454 printk(KERN_WARNING "%s: %d buffers handled (should be 1)\n",
455 __func__, bc);
458 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
459 struct sram_channel *ch,
460 unsigned int bpl, u32 risc)
462 unsigned int i, lines;
463 u32 cdt;
465 if (ch->cmds_start == 0) {
466 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
467 ch->name);
468 cx_write(ch->ptr1_reg, 0);
469 cx_write(ch->ptr2_reg, 0);
470 cx_write(ch->cnt2_reg, 0);
471 cx_write(ch->cnt1_reg, 0);
472 return 0;
473 } else {
474 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
475 ch->name);
478 bpl = (bpl + 7) & ~7; /* alignment */
479 cdt = ch->cdt;
480 lines = ch->fifo_size / bpl;
481 if (lines > 6)
482 lines = 6;
483 BUG_ON(lines < 2);
485 cx_write(8 + 0, RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
486 cx_write(8 + 4, 8);
487 cx_write(8 + 8, 0);
489 /* write CDT */
490 for (i = 0; i < lines; i++) {
491 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
492 ch->fifo_start + bpl*i);
493 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
494 cx_write(cdt + 16*i + 4, 0);
495 cx_write(cdt + 16*i + 8, 0);
496 cx_write(cdt + 16*i + 12, 0);
499 /* write CMDS */
500 if (ch->jumponly)
501 cx_write(ch->cmds_start + 0, 8);
502 else
503 cx_write(ch->cmds_start + 0, risc);
504 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
505 cx_write(ch->cmds_start + 8, cdt);
506 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
507 cx_write(ch->cmds_start + 16, ch->ctrl_start);
508 if (ch->jumponly)
509 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
510 else
511 cx_write(ch->cmds_start + 20, 64 >> 2);
512 for (i = 24; i < 80; i += 4)
513 cx_write(ch->cmds_start + i, 0);
515 /* fill registers */
516 cx_write(ch->ptr1_reg, ch->fifo_start);
517 cx_write(ch->ptr2_reg, cdt);
518 cx_write(ch->cnt2_reg, (lines*16) >> 3);
519 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
521 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
522 dev->bridge,
523 ch->name,
524 bpl,
525 lines);
527 return 0;
530 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
531 struct sram_channel *ch)
533 static char *name[] = {
534 "init risc lo",
535 "init risc hi",
536 "cdt base",
537 "cdt size",
538 "iq base",
539 "iq size",
540 "risc pc lo",
541 "risc pc hi",
542 "iq wr ptr",
543 "iq rd ptr",
544 "cdt current",
545 "pci target lo",
546 "pci target hi",
547 "line / byte",
549 u32 risc;
550 unsigned int i, j, n;
552 printk(KERN_WARNING "%s: %s - dma channel status dump\n",
553 dev->name, ch->name);
554 for (i = 0; i < ARRAY_SIZE(name); i++)
555 printk(KERN_WARNING "%s: cmds: %-15s: 0x%08x\n",
556 dev->name, name[i],
557 cx_read(ch->cmds_start + 4*i));
559 for (i = 0; i < 4; i++) {
560 risc = cx_read(ch->cmds_start + 4 * (i + 14));
561 printk(KERN_WARNING "%s: risc%d: ", dev->name, i);
562 cx23885_risc_decode(risc);
564 for (i = 0; i < (64 >> 2); i += n) {
565 risc = cx_read(ch->ctrl_start + 4 * i);
566 /* No consideration for bits 63-32 */
568 printk(KERN_WARNING "%s: (0x%08x) iq %x: ", dev->name,
569 ch->ctrl_start + 4 * i, i);
570 n = cx23885_risc_decode(risc);
571 for (j = 1; j < n; j++) {
572 risc = cx_read(ch->ctrl_start + 4 * (i + j));
573 printk(KERN_WARNING "%s: iq %x: 0x%08x [ arg #%d ]\n",
574 dev->name, i+j, risc, j);
578 printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
579 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
580 printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
581 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
582 printk(KERN_WARNING "%s: ptr1_reg: 0x%08x\n",
583 dev->name, cx_read(ch->ptr1_reg));
584 printk(KERN_WARNING "%s: ptr2_reg: 0x%08x\n",
585 dev->name, cx_read(ch->ptr2_reg));
586 printk(KERN_WARNING "%s: cnt1_reg: 0x%08x\n",
587 dev->name, cx_read(ch->cnt1_reg));
588 printk(KERN_WARNING "%s: cnt2_reg: 0x%08x\n",
589 dev->name, cx_read(ch->cnt2_reg));
592 static void cx23885_risc_disasm(struct cx23885_tsport *port,
593 struct btcx_riscmem *risc)
595 struct cx23885_dev *dev = port->dev;
596 unsigned int i, j, n;
598 printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
599 dev->name, risc->cpu, (unsigned long)risc->dma);
600 for (i = 0; i < (risc->size >> 2); i += n) {
601 printk(KERN_INFO "%s: %04d: ", dev->name, i);
602 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
603 for (j = 1; j < n; j++)
604 printk(KERN_INFO "%s: %04d: 0x%08x [ arg #%d ]\n",
605 dev->name, i + j, risc->cpu[i + j], j);
606 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
607 break;
611 static void cx23885_shutdown(struct cx23885_dev *dev)
613 /* disable RISC controller */
614 cx_write(DEV_CNTRL2, 0);
616 /* Disable all IR activity */
617 cx_write(IR_CNTRL_REG, 0);
619 /* Disable Video A/B activity */
620 cx_write(VID_A_DMA_CTL, 0);
621 cx_write(VID_B_DMA_CTL, 0);
622 cx_write(VID_C_DMA_CTL, 0);
624 /* Disable Audio activity */
625 cx_write(AUD_INT_DMA_CTL, 0);
626 cx_write(AUD_EXT_DMA_CTL, 0);
628 /* Disable Serial port */
629 cx_write(UART_CTL, 0);
631 /* Disable Interrupts */
632 cx23885_irq_disable_all(dev);
633 cx_write(VID_A_INT_MSK, 0);
634 cx_write(VID_B_INT_MSK, 0);
635 cx_write(VID_C_INT_MSK, 0);
636 cx_write(AUDIO_INT_INT_MSK, 0);
637 cx_write(AUDIO_EXT_INT_MSK, 0);
641 static void cx23885_reset(struct cx23885_dev *dev)
643 dprintk(1, "%s()\n", __func__);
645 cx23885_shutdown(dev);
647 cx_write(PCI_INT_STAT, 0xffffffff);
648 cx_write(VID_A_INT_STAT, 0xffffffff);
649 cx_write(VID_B_INT_STAT, 0xffffffff);
650 cx_write(VID_C_INT_STAT, 0xffffffff);
651 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
652 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
653 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
654 cx_write(PAD_CTRL, 0x00500300);
656 mdelay(100);
658 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
659 720*4, 0);
660 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
661 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
662 188*4, 0);
663 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
664 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
665 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
666 188*4, 0);
667 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
668 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
669 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
671 cx23885_gpio_setup(dev);
675 static int cx23885_pci_quirks(struct cx23885_dev *dev)
677 dprintk(1, "%s()\n", __func__);
679 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
680 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
681 * occur on the cx23887 bridge.
683 if (dev->bridge == CX23885_BRIDGE_885)
684 cx_clear(RDR_TLCTL0, 1 << 4);
686 return 0;
689 static int get_resources(struct cx23885_dev *dev)
691 if (request_mem_region(pci_resource_start(dev->pci, 0),
692 pci_resource_len(dev->pci, 0),
693 dev->name))
694 return 0;
696 printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
697 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
699 return -EBUSY;
702 static void cx23885_timeout(unsigned long data);
703 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
704 u32 reg, u32 mask, u32 value);
706 static int cx23885_init_tsport(struct cx23885_dev *dev,
707 struct cx23885_tsport *port, int portno)
709 dprintk(1, "%s(portno=%d)\n", __func__, portno);
711 /* Transport bus init dma queue - Common settings */
712 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
713 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
714 port->vld_misc_val = 0x0;
715 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
717 spin_lock_init(&port->slock);
718 port->dev = dev;
719 port->nr = portno;
721 INIT_LIST_HEAD(&port->mpegq.active);
722 INIT_LIST_HEAD(&port->mpegq.queued);
723 port->mpegq.timeout.function = cx23885_timeout;
724 port->mpegq.timeout.data = (unsigned long)port;
725 init_timer(&port->mpegq.timeout);
727 mutex_init(&port->frontends.lock);
728 INIT_LIST_HEAD(&port->frontends.felist);
729 port->frontends.active_fe_id = 0;
731 /* This should be hardcoded allow a single frontend
732 * attachment to this tsport, keeping the -dvb.c
733 * code clean and safe.
735 if (!port->num_frontends)
736 port->num_frontends = 1;
738 switch (portno) {
739 case 1:
740 port->reg_gpcnt = VID_B_GPCNT;
741 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
742 port->reg_dma_ctl = VID_B_DMA_CTL;
743 port->reg_lngth = VID_B_LNGTH;
744 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
745 port->reg_gen_ctrl = VID_B_GEN_CTL;
746 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
747 port->reg_sop_status = VID_B_SOP_STATUS;
748 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
749 port->reg_vld_misc = VID_B_VLD_MISC;
750 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
751 port->reg_src_sel = VID_B_SRC_SEL;
752 port->reg_ts_int_msk = VID_B_INT_MSK;
753 port->reg_ts_int_stat = VID_B_INT_STAT;
754 port->sram_chno = SRAM_CH03; /* VID_B */
755 port->pci_irqmask = 0x02; /* VID_B bit1 */
756 break;
757 case 2:
758 port->reg_gpcnt = VID_C_GPCNT;
759 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
760 port->reg_dma_ctl = VID_C_DMA_CTL;
761 port->reg_lngth = VID_C_LNGTH;
762 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
763 port->reg_gen_ctrl = VID_C_GEN_CTL;
764 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
765 port->reg_sop_status = VID_C_SOP_STATUS;
766 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
767 port->reg_vld_misc = VID_C_VLD_MISC;
768 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
769 port->reg_src_sel = 0;
770 port->reg_ts_int_msk = VID_C_INT_MSK;
771 port->reg_ts_int_stat = VID_C_INT_STAT;
772 port->sram_chno = SRAM_CH06; /* VID_C */
773 port->pci_irqmask = 0x04; /* VID_C bit2 */
774 break;
775 default:
776 BUG();
779 cx23885_risc_stopper(dev->pci, &port->mpegq.stopper,
780 port->reg_dma_ctl, port->dma_ctl_val, 0x00);
782 return 0;
785 static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
787 switch (cx_read(RDR_CFG2) & 0xff) {
788 case 0x00:
789 /* cx23885 */
790 dev->hwrevision = 0xa0;
791 break;
792 case 0x01:
793 /* CX23885-12Z */
794 dev->hwrevision = 0xa1;
795 break;
796 case 0x02:
797 /* CX23885-13Z/14Z */
798 dev->hwrevision = 0xb0;
799 break;
800 case 0x03:
801 if (dev->pci->device == 0x8880) {
802 /* CX23888-21Z/22Z */
803 dev->hwrevision = 0xc0;
804 } else {
805 /* CX23885-14Z */
806 dev->hwrevision = 0xa4;
808 break;
809 case 0x04:
810 if (dev->pci->device == 0x8880) {
811 /* CX23888-31Z */
812 dev->hwrevision = 0xd0;
813 } else {
814 /* CX23885-15Z, CX23888-31Z */
815 dev->hwrevision = 0xa5;
817 break;
818 case 0x0e:
819 /* CX23887-15Z */
820 dev->hwrevision = 0xc0;
821 break;
822 case 0x0f:
823 /* CX23887-14Z */
824 dev->hwrevision = 0xb1;
825 break;
826 default:
827 printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
828 __func__, dev->hwrevision);
830 if (dev->hwrevision)
831 printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
832 __func__, dev->hwrevision);
833 else
834 printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
835 __func__, dev->hwrevision);
838 /* Find the first v4l2_subdev member of the group id in hw */
839 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
841 struct v4l2_subdev *result = NULL;
842 struct v4l2_subdev *sd;
844 spin_lock(&dev->v4l2_dev.lock);
845 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
846 if (sd->grp_id == hw) {
847 result = sd;
848 break;
851 spin_unlock(&dev->v4l2_dev.lock);
852 return result;
855 static int cx23885_dev_setup(struct cx23885_dev *dev)
857 int i;
859 spin_lock_init(&dev->pci_irqmask_lock);
861 mutex_init(&dev->lock);
862 mutex_init(&dev->gpio_lock);
864 atomic_inc(&dev->refcount);
866 dev->nr = cx23885_devcount++;
867 sprintf(dev->name, "cx23885[%d]", dev->nr);
869 /* Configure the internal memory */
870 if (dev->pci->device == 0x8880) {
871 /* Could be 887 or 888, assume a default */
872 dev->bridge = CX23885_BRIDGE_887;
873 /* Apply a sensible clock frequency for the PCIe bridge */
874 dev->clk_freq = 25000000;
875 dev->sram_channels = cx23887_sram_channels;
876 } else
877 if (dev->pci->device == 0x8852) {
878 dev->bridge = CX23885_BRIDGE_885;
879 /* Apply a sensible clock frequency for the PCIe bridge */
880 dev->clk_freq = 28000000;
881 dev->sram_channels = cx23885_sram_channels;
882 } else
883 BUG();
885 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
886 __func__, dev->bridge);
888 /* board config */
889 dev->board = UNSET;
890 if (card[dev->nr] < cx23885_bcount)
891 dev->board = card[dev->nr];
892 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
893 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
894 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
895 dev->board = cx23885_subids[i].card;
896 if (UNSET == dev->board) {
897 dev->board = CX23885_BOARD_UNKNOWN;
898 cx23885_card_list(dev);
901 /* If the user specific a clk freq override, apply it */
902 if (cx23885_boards[dev->board].clk_freq > 0)
903 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
905 dev->pci_bus = dev->pci->bus->number;
906 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
907 cx23885_irq_add(dev, 0x001f00);
909 /* External Master 1 Bus */
910 dev->i2c_bus[0].nr = 0;
911 dev->i2c_bus[0].dev = dev;
912 dev->i2c_bus[0].reg_stat = I2C1_STAT;
913 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
914 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
915 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
916 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
917 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
919 /* External Master 2 Bus */
920 dev->i2c_bus[1].nr = 1;
921 dev->i2c_bus[1].dev = dev;
922 dev->i2c_bus[1].reg_stat = I2C2_STAT;
923 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
924 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
925 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
926 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
927 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
929 /* Internal Master 3 Bus */
930 dev->i2c_bus[2].nr = 2;
931 dev->i2c_bus[2].dev = dev;
932 dev->i2c_bus[2].reg_stat = I2C3_STAT;
933 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
934 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
935 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
936 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
937 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
939 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
940 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
941 cx23885_init_tsport(dev, &dev->ts1, 1);
943 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
944 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
945 cx23885_init_tsport(dev, &dev->ts2, 2);
947 if (get_resources(dev) < 0) {
948 printk(KERN_ERR "CORE %s No more PCIe resources for "
949 "subsystem: %04x:%04x\n",
950 dev->name, dev->pci->subsystem_vendor,
951 dev->pci->subsystem_device);
953 cx23885_devcount--;
954 return -ENODEV;
957 /* PCIe stuff */
958 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
959 pci_resource_len(dev->pci, 0));
961 dev->bmmio = (u8 __iomem *)dev->lmmio;
963 printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
964 dev->name, dev->pci->subsystem_vendor,
965 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
966 dev->board, card[dev->nr] == dev->board ?
967 "insmod option" : "autodetected");
969 cx23885_pci_quirks(dev);
971 /* Assume some sensible defaults */
972 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
973 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
974 dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
975 dev->radio_type = cx23885_boards[dev->board].radio_type;
976 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
978 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
979 __func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
980 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
981 __func__, dev->radio_type, dev->radio_addr);
983 /* The cx23417 encoder has GPIO's that need to be initialised
984 * before DVB, so that demodulators and tuners are out of
985 * reset before DVB uses them.
987 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
988 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
989 cx23885_mc417_init(dev);
991 /* init hardware */
992 cx23885_reset(dev);
994 cx23885_i2c_register(&dev->i2c_bus[0]);
995 cx23885_i2c_register(&dev->i2c_bus[1]);
996 cx23885_i2c_register(&dev->i2c_bus[2]);
997 cx23885_card_setup(dev);
998 call_all(dev, core, s_power, 0);
999 cx23885_ir_init(dev);
1001 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
1002 if (cx23885_video_register(dev) < 0) {
1003 printk(KERN_ERR "%s() Failed to register analog "
1004 "video adapters on VID_A\n", __func__);
1008 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1009 if (cx23885_boards[dev->board].num_fds_portb)
1010 dev->ts1.num_frontends =
1011 cx23885_boards[dev->board].num_fds_portb;
1012 if (cx23885_dvb_register(&dev->ts1) < 0) {
1013 printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
1014 __func__);
1016 } else
1017 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1018 if (cx23885_417_register(dev) < 0) {
1019 printk(KERN_ERR
1020 "%s() Failed to register 417 on VID_B\n",
1021 __func__);
1025 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1026 if (cx23885_boards[dev->board].num_fds_portc)
1027 dev->ts2.num_frontends =
1028 cx23885_boards[dev->board].num_fds_portc;
1029 if (cx23885_dvb_register(&dev->ts2) < 0) {
1030 printk(KERN_ERR
1031 "%s() Failed to register dvb on VID_C\n",
1032 __func__);
1034 } else
1035 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1036 if (cx23885_417_register(dev) < 0) {
1037 printk(KERN_ERR
1038 "%s() Failed to register 417 on VID_C\n",
1039 __func__);
1043 cx23885_dev_checkrevision(dev);
1045 /* disable MSI for NetUP cards, otherwise CI is not working */
1046 if (cx23885_boards[dev->board].ci_type > 0)
1047 cx_clear(RDR_RDRCTL1, 1 << 8);
1049 return 0;
1052 static void cx23885_dev_unregister(struct cx23885_dev *dev)
1054 release_mem_region(pci_resource_start(dev->pci, 0),
1055 pci_resource_len(dev->pci, 0));
1057 if (!atomic_dec_and_test(&dev->refcount))
1058 return;
1060 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1061 cx23885_video_unregister(dev);
1063 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1064 cx23885_dvb_unregister(&dev->ts1);
1066 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1067 cx23885_417_unregister(dev);
1069 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1070 cx23885_dvb_unregister(&dev->ts2);
1072 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1073 cx23885_417_unregister(dev);
1075 cx23885_i2c_unregister(&dev->i2c_bus[2]);
1076 cx23885_i2c_unregister(&dev->i2c_bus[1]);
1077 cx23885_i2c_unregister(&dev->i2c_bus[0]);
1079 iounmap(dev->lmmio);
1082 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1083 unsigned int offset, u32 sync_line,
1084 unsigned int bpl, unsigned int padding,
1085 unsigned int lines, unsigned int lpi)
1087 struct scatterlist *sg;
1088 unsigned int line, todo, sol;
1090 /* sync instruction */
1091 if (sync_line != NO_SYNC_LINE)
1092 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1094 /* scan lines */
1095 sg = sglist;
1096 for (line = 0; line < lines; line++) {
1097 while (offset && offset >= sg_dma_len(sg)) {
1098 offset -= sg_dma_len(sg);
1099 sg++;
1102 if (lpi && line > 0 && !(line % lpi))
1103 sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1104 else
1105 sol = RISC_SOL;
1107 if (bpl <= sg_dma_len(sg)-offset) {
1108 /* fits into current chunk */
1109 *(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
1110 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1111 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1112 offset += bpl;
1113 } else {
1114 /* scanline needs to be split */
1115 todo = bpl;
1116 *(rp++) = cpu_to_le32(RISC_WRITE|sol|
1117 (sg_dma_len(sg)-offset));
1118 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1119 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1120 todo -= (sg_dma_len(sg)-offset);
1121 offset = 0;
1122 sg++;
1123 while (todo > sg_dma_len(sg)) {
1124 *(rp++) = cpu_to_le32(RISC_WRITE|
1125 sg_dma_len(sg));
1126 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1127 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1128 todo -= sg_dma_len(sg);
1129 sg++;
1131 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1132 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1133 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1134 offset += todo;
1136 offset += padding;
1139 return rp;
1142 int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
1143 struct scatterlist *sglist, unsigned int top_offset,
1144 unsigned int bottom_offset, unsigned int bpl,
1145 unsigned int padding, unsigned int lines)
1147 u32 instructions, fields;
1148 __le32 *rp;
1149 int rc;
1151 fields = 0;
1152 if (UNSET != top_offset)
1153 fields++;
1154 if (UNSET != bottom_offset)
1155 fields++;
1157 /* estimate risc mem: worst case is one write per page border +
1158 one write per scan line + syncs + jump (all 2 dwords). Padding
1159 can cause next bpl to start close to a page border. First DMA
1160 region may be smaller than PAGE_SIZE */
1161 /* write and jump need and extra dword */
1162 instructions = fields * (1 + ((bpl + padding) * lines)
1163 / PAGE_SIZE + lines);
1164 instructions += 2;
1165 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1166 if (rc < 0)
1167 return rc;
1169 /* write risc instructions */
1170 rp = risc->cpu;
1171 if (UNSET != top_offset)
1172 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1173 bpl, padding, lines, 0);
1174 if (UNSET != bottom_offset)
1175 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1176 bpl, padding, lines, 0);
1178 /* save pointer to jmp instruction address */
1179 risc->jmp = rp;
1180 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1181 return 0;
1184 int cx23885_risc_databuffer(struct pci_dev *pci,
1185 struct btcx_riscmem *risc,
1186 struct scatterlist *sglist,
1187 unsigned int bpl,
1188 unsigned int lines, unsigned int lpi)
1190 u32 instructions;
1191 __le32 *rp;
1192 int rc;
1194 /* estimate risc mem: worst case is one write per page border +
1195 one write per scan line + syncs + jump (all 2 dwords). Here
1196 there is no padding and no sync. First DMA region may be smaller
1197 than PAGE_SIZE */
1198 /* Jump and write need an extra dword */
1199 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
1200 instructions += 1;
1202 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1203 if (rc < 0)
1204 return rc;
1206 /* write risc instructions */
1207 rp = risc->cpu;
1208 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
1209 bpl, 0, lines, lpi);
1211 /* save pointer to jmp instruction address */
1212 risc->jmp = rp;
1213 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1214 return 0;
1217 int cx23885_risc_vbibuffer(struct pci_dev *pci, struct btcx_riscmem *risc,
1218 struct scatterlist *sglist, unsigned int top_offset,
1219 unsigned int bottom_offset, unsigned int bpl,
1220 unsigned int padding, unsigned int lines)
1222 u32 instructions, fields;
1223 __le32 *rp;
1224 int rc;
1226 fields = 0;
1227 if (UNSET != top_offset)
1228 fields++;
1229 if (UNSET != bottom_offset)
1230 fields++;
1232 /* estimate risc mem: worst case is one write per page border +
1233 one write per scan line + syncs + jump (all 2 dwords). Padding
1234 can cause next bpl to start close to a page border. First DMA
1235 region may be smaller than PAGE_SIZE */
1236 /* write and jump need and extra dword */
1237 instructions = fields * (1 + ((bpl + padding) * lines)
1238 / PAGE_SIZE + lines);
1239 instructions += 2;
1240 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1241 if (rc < 0)
1242 return rc;
1243 /* write risc instructions */
1244 rp = risc->cpu;
1246 /* Sync to line 6, so US CC line 21 will appear in line '12'
1247 * in the userland vbi payload */
1248 if (UNSET != top_offset)
1249 rp = cx23885_risc_field(rp, sglist, top_offset, 6,
1250 bpl, padding, lines, 0);
1252 if (UNSET != bottom_offset)
1253 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x207,
1254 bpl, padding, lines, 0);
1258 /* save pointer to jmp instruction address */
1259 risc->jmp = rp;
1260 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1261 return 0;
1265 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
1266 u32 reg, u32 mask, u32 value)
1268 __le32 *rp;
1269 int rc;
1271 rc = btcx_riscmem_alloc(pci, risc, 4*16);
1272 if (rc < 0)
1273 return rc;
1275 /* write risc instructions */
1276 rp = risc->cpu;
1277 *(rp++) = cpu_to_le32(RISC_WRITECR | RISC_IRQ2);
1278 *(rp++) = cpu_to_le32(reg);
1279 *(rp++) = cpu_to_le32(value);
1280 *(rp++) = cpu_to_le32(mask);
1281 *(rp++) = cpu_to_le32(RISC_JUMP);
1282 *(rp++) = cpu_to_le32(risc->dma);
1283 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1284 return 0;
1287 void cx23885_free_buffer(struct videobuf_queue *q, struct cx23885_buffer *buf)
1289 struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
1291 BUG_ON(in_interrupt());
1292 videobuf_waiton(q, &buf->vb, 0, 0);
1293 videobuf_dma_unmap(q->dev, dma);
1294 videobuf_dma_free(dma);
1295 btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
1296 buf->vb.state = VIDEOBUF_NEEDS_INIT;
1299 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1301 struct cx23885_dev *dev = port->dev;
1303 dprintk(1, "%s() Register Dump\n", __func__);
1304 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
1305 cx_read(DEV_CNTRL2));
1306 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
1307 cx23885_irq_get_mask(dev));
1308 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
1309 cx_read(AUDIO_INT_INT_MSK));
1310 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
1311 cx_read(AUD_INT_DMA_CTL));
1312 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
1313 cx_read(AUDIO_EXT_INT_MSK));
1314 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
1315 cx_read(AUD_EXT_DMA_CTL));
1316 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
1317 cx_read(PAD_CTRL));
1318 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
1319 cx_read(ALT_PIN_OUT_SEL));
1320 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
1321 cx_read(GPIO2));
1322 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
1323 port->reg_gpcnt, cx_read(port->reg_gpcnt));
1324 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
1325 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1326 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
1327 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1328 if (port->reg_src_sel)
1329 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1330 port->reg_src_sel, cx_read(port->reg_src_sel));
1331 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
1332 port->reg_lngth, cx_read(port->reg_lngth));
1333 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
1334 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1335 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
1336 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1337 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
1338 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1339 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
1340 port->reg_sop_status, cx_read(port->reg_sop_status));
1341 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1342 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1343 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
1344 port->reg_vld_misc, cx_read(port->reg_vld_misc));
1345 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
1346 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1347 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
1348 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1351 static int cx23885_start_dma(struct cx23885_tsport *port,
1352 struct cx23885_dmaqueue *q,
1353 struct cx23885_buffer *buf)
1355 struct cx23885_dev *dev = port->dev;
1356 u32 reg;
1358 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1359 buf->vb.width, buf->vb.height, buf->vb.field);
1361 /* Stop the fifo and risc engine for this port */
1362 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1364 /* setup fifo + format */
1365 cx23885_sram_channel_setup(dev,
1366 &dev->sram_channels[port->sram_chno],
1367 port->ts_packet_size, buf->risc.dma);
1368 if (debug > 5) {
1369 cx23885_sram_channel_dump(dev,
1370 &dev->sram_channels[port->sram_chno]);
1371 cx23885_risc_disasm(port, &buf->risc);
1374 /* write TS length to chip */
1375 cx_write(port->reg_lngth, buf->vb.width);
1377 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1378 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1379 printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1380 __func__,
1381 cx23885_boards[dev->board].portb,
1382 cx23885_boards[dev->board].portc);
1383 return -EINVAL;
1386 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1387 cx23885_av_clk(dev, 0);
1389 udelay(100);
1391 /* If the port supports SRC SELECT, configure it */
1392 if (port->reg_src_sel)
1393 cx_write(port->reg_src_sel, port->src_sel_val);
1395 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1396 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1397 cx_write(port->reg_vld_misc, port->vld_misc_val);
1398 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1399 udelay(100);
1401 /* NOTE: this is 2 (reserved) for portb, does it matter? */
1402 /* reset counter to zero */
1403 cx_write(port->reg_gpcnt_ctl, 3);
1404 q->count = 1;
1406 /* Set VIDB pins to input */
1407 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1408 reg = cx_read(PAD_CTRL);
1409 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1410 cx_write(PAD_CTRL, reg);
1413 /* Set VIDC pins to input */
1414 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1415 reg = cx_read(PAD_CTRL);
1416 reg &= ~0x4; /* Clear TS2_SOP_OE */
1417 cx_write(PAD_CTRL, reg);
1420 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1422 reg = cx_read(PAD_CTRL);
1423 reg = reg & ~0x1; /* Clear TS1_OE */
1425 /* FIXME, bit 2 writing here is questionable */
1426 /* set TS1_SOP_OE and TS1_OE_HI */
1427 reg = reg | 0xa;
1428 cx_write(PAD_CTRL, reg);
1430 /* FIXME and these two registers should be documented. */
1431 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1432 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1435 switch (dev->bridge) {
1436 case CX23885_BRIDGE_885:
1437 case CX23885_BRIDGE_887:
1438 case CX23885_BRIDGE_888:
1439 /* enable irqs */
1440 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1441 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1442 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1443 cx23885_irq_add(dev, port->pci_irqmask);
1444 cx23885_irq_enable_all(dev);
1445 break;
1446 default:
1447 BUG();
1450 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1452 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1453 cx23885_av_clk(dev, 1);
1455 if (debug > 4)
1456 cx23885_tsport_reg_dump(port);
1458 return 0;
1461 static int cx23885_stop_dma(struct cx23885_tsport *port)
1463 struct cx23885_dev *dev = port->dev;
1464 u32 reg;
1466 dprintk(1, "%s()\n", __func__);
1468 /* Stop interrupts and DMA */
1469 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1470 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1472 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1474 reg = cx_read(PAD_CTRL);
1476 /* Set TS1_OE */
1477 reg = reg | 0x1;
1479 /* clear TS1_SOP_OE and TS1_OE_HI */
1480 reg = reg & ~0xa;
1481 cx_write(PAD_CTRL, reg);
1482 cx_write(port->reg_src_sel, 0);
1483 cx_write(port->reg_gen_ctrl, 8);
1487 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1488 cx23885_av_clk(dev, 0);
1490 return 0;
1493 int cx23885_restart_queue(struct cx23885_tsport *port,
1494 struct cx23885_dmaqueue *q)
1496 struct cx23885_dev *dev = port->dev;
1497 struct cx23885_buffer *buf;
1499 dprintk(5, "%s()\n", __func__);
1500 if (list_empty(&q->active)) {
1501 struct cx23885_buffer *prev;
1502 prev = NULL;
1504 dprintk(5, "%s() queue is empty\n", __func__);
1506 for (;;) {
1507 if (list_empty(&q->queued))
1508 return 0;
1509 buf = list_entry(q->queued.next, struct cx23885_buffer,
1510 vb.queue);
1511 if (NULL == prev) {
1512 list_del(&buf->vb.queue);
1513 list_add_tail(&buf->vb.queue, &q->active);
1514 cx23885_start_dma(port, q, buf);
1515 buf->vb.state = VIDEOBUF_ACTIVE;
1516 buf->count = q->count++;
1517 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
1518 dprintk(5, "[%p/%d] restart_queue - f/active\n",
1519 buf, buf->vb.i);
1521 } else if (prev->vb.width == buf->vb.width &&
1522 prev->vb.height == buf->vb.height &&
1523 prev->fmt == buf->fmt) {
1524 list_del(&buf->vb.queue);
1525 list_add_tail(&buf->vb.queue, &q->active);
1526 buf->vb.state = VIDEOBUF_ACTIVE;
1527 buf->count = q->count++;
1528 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1529 /* 64 bit bits 63-32 */
1530 prev->risc.jmp[2] = cpu_to_le32(0);
1531 dprintk(5, "[%p/%d] restart_queue - m/active\n",
1532 buf, buf->vb.i);
1533 } else {
1534 return 0;
1536 prev = buf;
1538 return 0;
1541 buf = list_entry(q->active.next, struct cx23885_buffer, vb.queue);
1542 dprintk(2, "restart_queue [%p/%d]: restart dma\n",
1543 buf, buf->vb.i);
1544 cx23885_start_dma(port, q, buf);
1545 list_for_each_entry(buf, &q->active, vb.queue)
1546 buf->count = q->count++;
1547 mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
1548 return 0;
1551 /* ------------------------------------------------------------------ */
1553 int cx23885_buf_prepare(struct videobuf_queue *q, struct cx23885_tsport *port,
1554 struct cx23885_buffer *buf, enum v4l2_field field)
1556 struct cx23885_dev *dev = port->dev;
1557 int size = port->ts_packet_size * port->ts_packet_count;
1558 int rc;
1560 dprintk(1, "%s: %p\n", __func__, buf);
1561 if (0 != buf->vb.baddr && buf->vb.bsize < size)
1562 return -EINVAL;
1564 if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
1565 buf->vb.width = port->ts_packet_size;
1566 buf->vb.height = port->ts_packet_count;
1567 buf->vb.size = size;
1568 buf->vb.field = field /*V4L2_FIELD_TOP*/;
1570 rc = videobuf_iolock(q, &buf->vb, NULL);
1571 if (0 != rc)
1572 goto fail;
1573 cx23885_risc_databuffer(dev->pci, &buf->risc,
1574 videobuf_to_dma(&buf->vb)->sglist,
1575 buf->vb.width, buf->vb.height, 0);
1577 buf->vb.state = VIDEOBUF_PREPARED;
1578 return 0;
1580 fail:
1581 cx23885_free_buffer(q, buf);
1582 return rc;
1585 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1587 struct cx23885_buffer *prev;
1588 struct cx23885_dev *dev = port->dev;
1589 struct cx23885_dmaqueue *cx88q = &port->mpegq;
1591 /* add jump to stopper */
1592 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
1593 buf->risc.jmp[1] = cpu_to_le32(cx88q->stopper.dma);
1594 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1596 if (list_empty(&cx88q->active)) {
1597 dprintk(1, "queue is empty - first active\n");
1598 list_add_tail(&buf->vb.queue, &cx88q->active);
1599 cx23885_start_dma(port, cx88q, buf);
1600 buf->vb.state = VIDEOBUF_ACTIVE;
1601 buf->count = cx88q->count++;
1602 mod_timer(&cx88q->timeout, jiffies + BUFFER_TIMEOUT);
1603 dprintk(1, "[%p/%d] %s - first active\n",
1604 buf, buf->vb.i, __func__);
1605 } else {
1606 dprintk(1, "queue is not empty - append to active\n");
1607 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1608 vb.queue);
1609 list_add_tail(&buf->vb.queue, &cx88q->active);
1610 buf->vb.state = VIDEOBUF_ACTIVE;
1611 buf->count = cx88q->count++;
1612 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1613 prev->risc.jmp[2] = cpu_to_le32(0); /* 64 bit bits 63-32 */
1614 dprintk(1, "[%p/%d] %s - append to active\n",
1615 buf, buf->vb.i, __func__);
1619 /* ----------------------------------------------------------- */
1621 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason,
1622 int restart)
1624 struct cx23885_dev *dev = port->dev;
1625 struct cx23885_dmaqueue *q = &port->mpegq;
1626 struct cx23885_buffer *buf;
1627 unsigned long flags;
1629 spin_lock_irqsave(&port->slock, flags);
1630 while (!list_empty(&q->active)) {
1631 buf = list_entry(q->active.next, struct cx23885_buffer,
1632 vb.queue);
1633 list_del(&buf->vb.queue);
1634 buf->vb.state = VIDEOBUF_ERROR;
1635 wake_up(&buf->vb.done);
1636 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1637 buf, buf->vb.i, reason, (unsigned long)buf->risc.dma);
1639 if (restart) {
1640 dprintk(1, "restarting queue\n");
1641 cx23885_restart_queue(port, q);
1643 spin_unlock_irqrestore(&port->slock, flags);
1646 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1648 struct cx23885_dev *dev = port->dev;
1649 struct cx23885_dmaqueue *q = &port->mpegq;
1651 dprintk(1, "%s()\n", __func__);
1652 del_timer_sync(&q->timeout);
1653 cx23885_stop_dma(port);
1654 do_cancel_buffers(port, "cancel", 0);
1657 static void cx23885_timeout(unsigned long data)
1659 struct cx23885_tsport *port = (struct cx23885_tsport *)data;
1660 struct cx23885_dev *dev = port->dev;
1662 dprintk(1, "%s()\n", __func__);
1664 if (debug > 5)
1665 cx23885_sram_channel_dump(dev,
1666 &dev->sram_channels[port->sram_chno]);
1668 cx23885_stop_dma(port);
1669 do_cancel_buffers(port, "timeout", 1);
1672 int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1674 /* FIXME: port1 assumption here. */
1675 struct cx23885_tsport *port = &dev->ts1;
1676 int count = 0;
1677 int handled = 0;
1679 if (status == 0)
1680 return handled;
1682 count = cx_read(port->reg_gpcnt);
1683 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1684 status, cx_read(port->reg_ts_int_msk), count);
1686 if ((status & VID_B_MSK_BAD_PKT) ||
1687 (status & VID_B_MSK_OPC_ERR) ||
1688 (status & VID_B_MSK_VBI_OPC_ERR) ||
1689 (status & VID_B_MSK_SYNC) ||
1690 (status & VID_B_MSK_VBI_SYNC) ||
1691 (status & VID_B_MSK_OF) ||
1692 (status & VID_B_MSK_VBI_OF)) {
1693 printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
1694 "= 0x%x\n", dev->name, status);
1695 if (status & VID_B_MSK_BAD_PKT)
1696 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1697 if (status & VID_B_MSK_OPC_ERR)
1698 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1699 if (status & VID_B_MSK_VBI_OPC_ERR)
1700 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1701 if (status & VID_B_MSK_SYNC)
1702 dprintk(1, " VID_B_MSK_SYNC\n");
1703 if (status & VID_B_MSK_VBI_SYNC)
1704 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1705 if (status & VID_B_MSK_OF)
1706 dprintk(1, " VID_B_MSK_OF\n");
1707 if (status & VID_B_MSK_VBI_OF)
1708 dprintk(1, " VID_B_MSK_VBI_OF\n");
1710 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1711 cx23885_sram_channel_dump(dev,
1712 &dev->sram_channels[port->sram_chno]);
1713 cx23885_417_check_encoder(dev);
1714 } else if (status & VID_B_MSK_RISCI1) {
1715 dprintk(7, " VID_B_MSK_RISCI1\n");
1716 spin_lock(&port->slock);
1717 cx23885_wakeup(port, &port->mpegq, count);
1718 spin_unlock(&port->slock);
1719 } else if (status & VID_B_MSK_RISCI2) {
1720 dprintk(7, " VID_B_MSK_RISCI2\n");
1721 spin_lock(&port->slock);
1722 cx23885_restart_queue(port, &port->mpegq);
1723 spin_unlock(&port->slock);
1725 if (status) {
1726 cx_write(port->reg_ts_int_stat, status);
1727 handled = 1;
1730 return handled;
1733 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1735 struct cx23885_dev *dev = port->dev;
1736 int handled = 0;
1737 u32 count;
1739 if ((status & VID_BC_MSK_OPC_ERR) ||
1740 (status & VID_BC_MSK_BAD_PKT) ||
1741 (status & VID_BC_MSK_SYNC) ||
1742 (status & VID_BC_MSK_OF)) {
1744 if (status & VID_BC_MSK_OPC_ERR)
1745 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1746 VID_BC_MSK_OPC_ERR);
1748 if (status & VID_BC_MSK_BAD_PKT)
1749 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1750 VID_BC_MSK_BAD_PKT);
1752 if (status & VID_BC_MSK_SYNC)
1753 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1754 VID_BC_MSK_SYNC);
1756 if (status & VID_BC_MSK_OF)
1757 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1758 VID_BC_MSK_OF);
1760 printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1762 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1763 cx23885_sram_channel_dump(dev,
1764 &dev->sram_channels[port->sram_chno]);
1766 } else if (status & VID_BC_MSK_RISCI1) {
1768 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1770 spin_lock(&port->slock);
1771 count = cx_read(port->reg_gpcnt);
1772 cx23885_wakeup(port, &port->mpegq, count);
1773 spin_unlock(&port->slock);
1775 } else if (status & VID_BC_MSK_RISCI2) {
1777 dprintk(7, " (RISCI2 0x%08x)\n", VID_BC_MSK_RISCI2);
1779 spin_lock(&port->slock);
1780 cx23885_restart_queue(port, &port->mpegq);
1781 spin_unlock(&port->slock);
1784 if (status) {
1785 cx_write(port->reg_ts_int_stat, status);
1786 handled = 1;
1789 return handled;
1792 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1794 struct cx23885_dev *dev = dev_id;
1795 struct cx23885_tsport *ts1 = &dev->ts1;
1796 struct cx23885_tsport *ts2 = &dev->ts2;
1797 u32 pci_status, pci_mask;
1798 u32 vida_status, vida_mask;
1799 u32 audint_status, audint_mask;
1800 u32 ts1_status, ts1_mask;
1801 u32 ts2_status, ts2_mask;
1802 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1803 int audint_count = 0;
1804 bool subdev_handled;
1806 pci_status = cx_read(PCI_INT_STAT);
1807 pci_mask = cx23885_irq_get_mask(dev);
1808 vida_status = cx_read(VID_A_INT_STAT);
1809 vida_mask = cx_read(VID_A_INT_MSK);
1810 audint_status = cx_read(AUDIO_INT_INT_STAT);
1811 audint_mask = cx_read(AUDIO_INT_INT_MSK);
1812 ts1_status = cx_read(VID_B_INT_STAT);
1813 ts1_mask = cx_read(VID_B_INT_MSK);
1814 ts2_status = cx_read(VID_C_INT_STAT);
1815 ts2_mask = cx_read(VID_C_INT_MSK);
1817 if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
1818 goto out;
1820 vida_count = cx_read(VID_A_GPCNT);
1821 audint_count = cx_read(AUD_INT_A_GPCNT);
1822 ts1_count = cx_read(ts1->reg_gpcnt);
1823 ts2_count = cx_read(ts2->reg_gpcnt);
1824 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1825 pci_status, pci_mask);
1826 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1827 vida_status, vida_mask, vida_count);
1828 dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1829 audint_status, audint_mask, audint_count);
1830 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1831 ts1_status, ts1_mask, ts1_count);
1832 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1833 ts2_status, ts2_mask, ts2_count);
1835 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1836 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1837 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1838 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1839 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
1840 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1842 if (pci_status & PCI_MSK_RISC_RD)
1843 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1844 PCI_MSK_RISC_RD);
1846 if (pci_status & PCI_MSK_RISC_WR)
1847 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1848 PCI_MSK_RISC_WR);
1850 if (pci_status & PCI_MSK_AL_RD)
1851 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1852 PCI_MSK_AL_RD);
1854 if (pci_status & PCI_MSK_AL_WR)
1855 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1856 PCI_MSK_AL_WR);
1858 if (pci_status & PCI_MSK_APB_DMA)
1859 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1860 PCI_MSK_APB_DMA);
1862 if (pci_status & PCI_MSK_VID_C)
1863 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1864 PCI_MSK_VID_C);
1866 if (pci_status & PCI_MSK_VID_B)
1867 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1868 PCI_MSK_VID_B);
1870 if (pci_status & PCI_MSK_VID_A)
1871 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1872 PCI_MSK_VID_A);
1874 if (pci_status & PCI_MSK_AUD_INT)
1875 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1876 PCI_MSK_AUD_INT);
1878 if (pci_status & PCI_MSK_AUD_EXT)
1879 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1880 PCI_MSK_AUD_EXT);
1882 if (pci_status & PCI_MSK_GPIO0)
1883 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1884 PCI_MSK_GPIO0);
1886 if (pci_status & PCI_MSK_GPIO1)
1887 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1888 PCI_MSK_GPIO1);
1890 if (pci_status & PCI_MSK_AV_CORE)
1891 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1892 PCI_MSK_AV_CORE);
1894 if (pci_status & PCI_MSK_IR)
1895 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1896 PCI_MSK_IR);
1899 if (cx23885_boards[dev->board].ci_type == 1 &&
1900 (pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1901 handled += netup_ci_slot_status(dev, pci_status);
1903 if (cx23885_boards[dev->board].ci_type == 2 &&
1904 (pci_status & PCI_MSK_GPIO0))
1905 handled += altera_ci_irq(dev);
1907 if (ts1_status) {
1908 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1909 handled += cx23885_irq_ts(ts1, ts1_status);
1910 else
1911 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1912 handled += cx23885_irq_417(dev, ts1_status);
1915 if (ts2_status) {
1916 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1917 handled += cx23885_irq_ts(ts2, ts2_status);
1918 else
1919 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1920 handled += cx23885_irq_417(dev, ts2_status);
1923 if (vida_status)
1924 handled += cx23885_video_irq(dev, vida_status);
1926 if (audint_status)
1927 handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1929 if (pci_status & PCI_MSK_IR) {
1930 subdev_handled = false;
1931 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1932 pci_status, &subdev_handled);
1933 if (subdev_handled)
1934 handled++;
1937 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1938 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1939 if (!schedule_work(&dev->cx25840_work))
1940 printk(KERN_ERR "%s: failed to set up deferred work for"
1941 " AV Core/IR interrupt. Interrupt is disabled"
1942 " and won't be re-enabled\n", dev->name);
1943 handled++;
1946 if (handled)
1947 cx_write(PCI_INT_STAT, pci_status);
1948 out:
1949 return IRQ_RETVAL(handled);
1952 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1953 unsigned int notification, void *arg)
1955 struct cx23885_dev *dev;
1957 if (sd == NULL)
1958 return;
1960 dev = to_cx23885(sd->v4l2_dev);
1962 switch (notification) {
1963 case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1964 if (sd == dev->sd_ir)
1965 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1966 break;
1967 case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1968 if (sd == dev->sd_ir)
1969 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1970 break;
1974 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1976 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1977 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1978 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1979 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1982 static inline int encoder_on_portb(struct cx23885_dev *dev)
1984 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1987 static inline int encoder_on_portc(struct cx23885_dev *dev)
1989 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1992 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1993 * registers depending on the board configuration (and whether the
1994 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1995 * be pushed into the correct hardware register, regardless of the
1996 * physical location. Certain registers are shared so we sanity check
1997 * and report errors if we think we're tampering with a GPIo that might
1998 * be assigned to the encoder (and used for the host bus).
2000 * GPIO 2 thru 0 - On the cx23885 bridge
2001 * GPIO 18 thru 3 - On the cx23417 host bus interface
2002 * GPIO 23 thru 19 - On the cx25840 a/v core
2004 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
2006 if (mask & 0x7)
2007 cx_set(GP0_IO, mask & 0x7);
2009 if (mask & 0x0007fff8) {
2010 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2011 printk(KERN_ERR
2012 "%s: Setting GPIO on encoder ports\n",
2013 dev->name);
2014 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
2017 /* TODO: 23-19 */
2018 if (mask & 0x00f80000)
2019 printk(KERN_INFO "%s: Unsupported\n", dev->name);
2022 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
2024 if (mask & 0x00000007)
2025 cx_clear(GP0_IO, mask & 0x7);
2027 if (mask & 0x0007fff8) {
2028 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2029 printk(KERN_ERR
2030 "%s: Clearing GPIO moving on encoder ports\n",
2031 dev->name);
2032 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
2035 /* TODO: 23-19 */
2036 if (mask & 0x00f80000)
2037 printk(KERN_INFO "%s: Unsupported\n", dev->name);
2040 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
2042 if (mask & 0x00000007)
2043 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
2045 if (mask & 0x0007fff8) {
2046 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2047 printk(KERN_ERR
2048 "%s: Reading GPIO moving on encoder ports\n",
2049 dev->name);
2050 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
2053 /* TODO: 23-19 */
2054 if (mask & 0x00f80000)
2055 printk(KERN_INFO "%s: Unsupported\n", dev->name);
2057 return 0;
2060 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
2062 if ((mask & 0x00000007) && asoutput)
2063 cx_set(GP0_IO, (mask & 0x7) << 16);
2064 else if ((mask & 0x00000007) && !asoutput)
2065 cx_clear(GP0_IO, (mask & 0x7) << 16);
2067 if (mask & 0x0007fff8) {
2068 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2069 printk(KERN_ERR
2070 "%s: Enabling GPIO on encoder ports\n",
2071 dev->name);
2074 /* MC417_OEN is active low for output, write 1 for an input */
2075 if ((mask & 0x0007fff8) && asoutput)
2076 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
2078 else if ((mask & 0x0007fff8) && !asoutput)
2079 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
2081 /* TODO: 23-19 */
2084 static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
2085 const struct pci_device_id *pci_id)
2087 struct cx23885_dev *dev;
2088 int err;
2090 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2091 if (NULL == dev)
2092 return -ENOMEM;
2094 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
2095 if (err < 0)
2096 goto fail_free;
2098 /* Prepare to handle notifications from subdevices */
2099 cx23885_v4l2_dev_notify_init(dev);
2101 /* pci init */
2102 dev->pci = pci_dev;
2103 if (pci_enable_device(pci_dev)) {
2104 err = -EIO;
2105 goto fail_unreg;
2108 if (cx23885_dev_setup(dev) < 0) {
2109 err = -EINVAL;
2110 goto fail_unreg;
2113 /* print pci info */
2114 dev->pci_rev = pci_dev->revision;
2115 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
2116 printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
2117 "latency: %d, mmio: 0x%llx\n", dev->name,
2118 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
2119 dev->pci_lat,
2120 (unsigned long long)pci_resource_start(pci_dev, 0));
2122 pci_set_master(pci_dev);
2123 if (!pci_dma_supported(pci_dev, 0xffffffff)) {
2124 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
2125 err = -EIO;
2126 goto fail_irq;
2129 err = request_irq(pci_dev->irq, cx23885_irq,
2130 IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
2131 if (err < 0) {
2132 printk(KERN_ERR "%s: can't get IRQ %d\n",
2133 dev->name, pci_dev->irq);
2134 goto fail_irq;
2137 switch (dev->board) {
2138 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2139 cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2140 break;
2141 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2142 cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
2143 break;
2147 * The CX2388[58] IR controller can start firing interrupts when
2148 * enabled, so these have to take place after the cx23885_irq() handler
2149 * is hooked up by the call to request_irq() above.
2151 cx23885_ir_pci_int_enable(dev);
2152 cx23885_input_init(dev);
2154 return 0;
2156 fail_irq:
2157 cx23885_dev_unregister(dev);
2158 fail_unreg:
2159 v4l2_device_unregister(&dev->v4l2_dev);
2160 fail_free:
2161 kfree(dev);
2162 return err;
2165 static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
2167 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2168 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2170 cx23885_input_fini(dev);
2171 cx23885_ir_fini(dev);
2173 cx23885_shutdown(dev);
2175 pci_disable_device(pci_dev);
2177 /* unregister stuff */
2178 free_irq(pci_dev->irq, dev);
2180 cx23885_dev_unregister(dev);
2181 v4l2_device_unregister(v4l2_dev);
2182 kfree(dev);
2185 static struct pci_device_id cx23885_pci_tbl[] = {
2187 /* CX23885 */
2188 .vendor = 0x14f1,
2189 .device = 0x8852,
2190 .subvendor = PCI_ANY_ID,
2191 .subdevice = PCI_ANY_ID,
2192 }, {
2193 /* CX23887 Rev 2 */
2194 .vendor = 0x14f1,
2195 .device = 0x8880,
2196 .subvendor = PCI_ANY_ID,
2197 .subdevice = PCI_ANY_ID,
2198 }, {
2199 /* --- end of list --- */
2202 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2204 static struct pci_driver cx23885_pci_driver = {
2205 .name = "cx23885",
2206 .id_table = cx23885_pci_tbl,
2207 .probe = cx23885_initdev,
2208 .remove = __devexit_p(cx23885_finidev),
2209 /* TODO */
2210 .suspend = NULL,
2211 .resume = NULL,
2214 static int __init cx23885_init(void)
2216 printk(KERN_INFO "cx23885 driver version %s loaded\n",
2217 CX23885_VERSION);
2218 return pci_register_driver(&cx23885_pci_driver);
2221 static void __exit cx23885_fini(void)
2223 pci_unregister_driver(&cx23885_pci_driver);
2226 module_init(cx23885_init);
2227 module_exit(cx23885_fini);