Merge branch 'v6v7' into devel
[linux/fpc-iii.git] / drivers / media / video / cx23885 / cx23885-core.c
blob359882419b7f588b7c698dbcfb6a39ddb1603301
1 /*
2 * Driver for the Conexant CX23885 PCIe bridge
4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/kmod.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <asm/div64.h>
33 #include "cx23885.h"
34 #include "cimax2.h"
35 #include "cx23888-ir.h"
36 #include "cx23885-ir.h"
37 #include "cx23885-av.h"
38 #include "cx23885-input.h"
40 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
41 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
42 MODULE_LICENSE("GPL");
44 static unsigned int debug;
45 module_param(debug, int, 0644);
46 MODULE_PARM_DESC(debug, "enable debug messages");
48 static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
49 module_param_array(card, int, NULL, 0444);
50 MODULE_PARM_DESC(card, "card type");
52 #define dprintk(level, fmt, arg...)\
53 do { if (debug >= level)\
54 printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
55 } while (0)
57 static unsigned int cx23885_devcount;
59 #define NO_SYNC_LINE (-1U)
61 /* FIXME, these allocations will change when
62 * analog arrives. The be reviewed.
63 * CX23887 Assumptions
64 * 1 line = 16 bytes of CDT
65 * cmds size = 80
66 * cdt size = 16 * linesize
67 * iqsize = 64
68 * maxlines = 6
70 * Address Space:
71 * 0x00000000 0x00008fff FIFO clusters
72 * 0x00010000 0x000104af Channel Management Data Structures
73 * 0x000104b0 0x000104ff Free
74 * 0x00010500 0x000108bf 15 channels * iqsize
75 * 0x000108c0 0x000108ff Free
76 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
77 * 15 channels * (iqsize + (maxlines * linesize))
78 * 0x00010ea0 0x00010xxx Free
81 static struct sram_channel cx23885_sram_channels[] = {
82 [SRAM_CH01] = {
83 .name = "VID A",
84 .cmds_start = 0x10000,
85 .ctrl_start = 0x10380,
86 .cdt = 0x104c0,
87 .fifo_start = 0x40,
88 .fifo_size = 0x2800,
89 .ptr1_reg = DMA1_PTR1,
90 .ptr2_reg = DMA1_PTR2,
91 .cnt1_reg = DMA1_CNT1,
92 .cnt2_reg = DMA1_CNT2,
94 [SRAM_CH02] = {
95 .name = "ch2",
96 .cmds_start = 0x0,
97 .ctrl_start = 0x0,
98 .cdt = 0x0,
99 .fifo_start = 0x0,
100 .fifo_size = 0x0,
101 .ptr1_reg = DMA2_PTR1,
102 .ptr2_reg = DMA2_PTR2,
103 .cnt1_reg = DMA2_CNT1,
104 .cnt2_reg = DMA2_CNT2,
106 [SRAM_CH03] = {
107 .name = "TS1 B",
108 .cmds_start = 0x100A0,
109 .ctrl_start = 0x10400,
110 .cdt = 0x10580,
111 .fifo_start = 0x5000,
112 .fifo_size = 0x1000,
113 .ptr1_reg = DMA3_PTR1,
114 .ptr2_reg = DMA3_PTR2,
115 .cnt1_reg = DMA3_CNT1,
116 .cnt2_reg = DMA3_CNT2,
118 [SRAM_CH04] = {
119 .name = "ch4",
120 .cmds_start = 0x0,
121 .ctrl_start = 0x0,
122 .cdt = 0x0,
123 .fifo_start = 0x0,
124 .fifo_size = 0x0,
125 .ptr1_reg = DMA4_PTR1,
126 .ptr2_reg = DMA4_PTR2,
127 .cnt1_reg = DMA4_CNT1,
128 .cnt2_reg = DMA4_CNT2,
130 [SRAM_CH05] = {
131 .name = "ch5",
132 .cmds_start = 0x0,
133 .ctrl_start = 0x0,
134 .cdt = 0x0,
135 .fifo_start = 0x0,
136 .fifo_size = 0x0,
137 .ptr1_reg = DMA5_PTR1,
138 .ptr2_reg = DMA5_PTR2,
139 .cnt1_reg = DMA5_CNT1,
140 .cnt2_reg = DMA5_CNT2,
142 [SRAM_CH06] = {
143 .name = "TS2 C",
144 .cmds_start = 0x10140,
145 .ctrl_start = 0x10440,
146 .cdt = 0x105e0,
147 .fifo_start = 0x6000,
148 .fifo_size = 0x1000,
149 .ptr1_reg = DMA5_PTR1,
150 .ptr2_reg = DMA5_PTR2,
151 .cnt1_reg = DMA5_CNT1,
152 .cnt2_reg = DMA5_CNT2,
154 [SRAM_CH07] = {
155 .name = "ch7",
156 .cmds_start = 0x0,
157 .ctrl_start = 0x0,
158 .cdt = 0x0,
159 .fifo_start = 0x0,
160 .fifo_size = 0x0,
161 .ptr1_reg = DMA6_PTR1,
162 .ptr2_reg = DMA6_PTR2,
163 .cnt1_reg = DMA6_CNT1,
164 .cnt2_reg = DMA6_CNT2,
166 [SRAM_CH08] = {
167 .name = "ch8",
168 .cmds_start = 0x0,
169 .ctrl_start = 0x0,
170 .cdt = 0x0,
171 .fifo_start = 0x0,
172 .fifo_size = 0x0,
173 .ptr1_reg = DMA7_PTR1,
174 .ptr2_reg = DMA7_PTR2,
175 .cnt1_reg = DMA7_CNT1,
176 .cnt2_reg = DMA7_CNT2,
178 [SRAM_CH09] = {
179 .name = "ch9",
180 .cmds_start = 0x0,
181 .ctrl_start = 0x0,
182 .cdt = 0x0,
183 .fifo_start = 0x0,
184 .fifo_size = 0x0,
185 .ptr1_reg = DMA8_PTR1,
186 .ptr2_reg = DMA8_PTR2,
187 .cnt1_reg = DMA8_CNT1,
188 .cnt2_reg = DMA8_CNT2,
192 static struct sram_channel cx23887_sram_channels[] = {
193 [SRAM_CH01] = {
194 .name = "VID A",
195 .cmds_start = 0x10000,
196 .ctrl_start = 0x105b0,
197 .cdt = 0x107b0,
198 .fifo_start = 0x40,
199 .fifo_size = 0x2800,
200 .ptr1_reg = DMA1_PTR1,
201 .ptr2_reg = DMA1_PTR2,
202 .cnt1_reg = DMA1_CNT1,
203 .cnt2_reg = DMA1_CNT2,
205 [SRAM_CH02] = {
206 .name = "ch2",
207 .cmds_start = 0x0,
208 .ctrl_start = 0x0,
209 .cdt = 0x0,
210 .fifo_start = 0x0,
211 .fifo_size = 0x0,
212 .ptr1_reg = DMA2_PTR1,
213 .ptr2_reg = DMA2_PTR2,
214 .cnt1_reg = DMA2_CNT1,
215 .cnt2_reg = DMA2_CNT2,
217 [SRAM_CH03] = {
218 .name = "TS1 B",
219 .cmds_start = 0x100A0,
220 .ctrl_start = 0x10630,
221 .cdt = 0x10870,
222 .fifo_start = 0x5000,
223 .fifo_size = 0x1000,
224 .ptr1_reg = DMA3_PTR1,
225 .ptr2_reg = DMA3_PTR2,
226 .cnt1_reg = DMA3_CNT1,
227 .cnt2_reg = DMA3_CNT2,
229 [SRAM_CH04] = {
230 .name = "ch4",
231 .cmds_start = 0x0,
232 .ctrl_start = 0x0,
233 .cdt = 0x0,
234 .fifo_start = 0x0,
235 .fifo_size = 0x0,
236 .ptr1_reg = DMA4_PTR1,
237 .ptr2_reg = DMA4_PTR2,
238 .cnt1_reg = DMA4_CNT1,
239 .cnt2_reg = DMA4_CNT2,
241 [SRAM_CH05] = {
242 .name = "ch5",
243 .cmds_start = 0x0,
244 .ctrl_start = 0x0,
245 .cdt = 0x0,
246 .fifo_start = 0x0,
247 .fifo_size = 0x0,
248 .ptr1_reg = DMA5_PTR1,
249 .ptr2_reg = DMA5_PTR2,
250 .cnt1_reg = DMA5_CNT1,
251 .cnt2_reg = DMA5_CNT2,
253 [SRAM_CH06] = {
254 .name = "TS2 C",
255 .cmds_start = 0x10140,
256 .ctrl_start = 0x10670,
257 .cdt = 0x108d0,
258 .fifo_start = 0x6000,
259 .fifo_size = 0x1000,
260 .ptr1_reg = DMA5_PTR1,
261 .ptr2_reg = DMA5_PTR2,
262 .cnt1_reg = DMA5_CNT1,
263 .cnt2_reg = DMA5_CNT2,
265 [SRAM_CH07] = {
266 .name = "ch7",
267 .cmds_start = 0x0,
268 .ctrl_start = 0x0,
269 .cdt = 0x0,
270 .fifo_start = 0x0,
271 .fifo_size = 0x0,
272 .ptr1_reg = DMA6_PTR1,
273 .ptr2_reg = DMA6_PTR2,
274 .cnt1_reg = DMA6_CNT1,
275 .cnt2_reg = DMA6_CNT2,
277 [SRAM_CH08] = {
278 .name = "ch8",
279 .cmds_start = 0x0,
280 .ctrl_start = 0x0,
281 .cdt = 0x0,
282 .fifo_start = 0x0,
283 .fifo_size = 0x0,
284 .ptr1_reg = DMA7_PTR1,
285 .ptr2_reg = DMA7_PTR2,
286 .cnt1_reg = DMA7_CNT1,
287 .cnt2_reg = DMA7_CNT2,
289 [SRAM_CH09] = {
290 .name = "ch9",
291 .cmds_start = 0x0,
292 .ctrl_start = 0x0,
293 .cdt = 0x0,
294 .fifo_start = 0x0,
295 .fifo_size = 0x0,
296 .ptr1_reg = DMA8_PTR1,
297 .ptr2_reg = DMA8_PTR2,
298 .cnt1_reg = DMA8_CNT1,
299 .cnt2_reg = DMA8_CNT2,
303 void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
305 unsigned long flags;
306 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
308 dev->pci_irqmask |= mask;
310 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
313 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
315 unsigned long flags;
316 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
318 dev->pci_irqmask |= mask;
319 cx_set(PCI_INT_MSK, mask);
321 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
324 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
326 u32 v;
327 unsigned long flags;
328 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
330 v = mask & dev->pci_irqmask;
331 if (v)
332 cx_set(PCI_INT_MSK, v);
334 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
337 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
339 cx23885_irq_enable(dev, 0xffffffff);
342 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
344 unsigned long flags;
345 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
347 cx_clear(PCI_INT_MSK, mask);
349 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
352 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
354 cx23885_irq_disable(dev, 0xffffffff);
357 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
359 unsigned long flags;
360 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
362 dev->pci_irqmask &= ~mask;
363 cx_clear(PCI_INT_MSK, mask);
365 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
368 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
370 u32 v;
371 unsigned long flags;
372 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
374 v = cx_read(PCI_INT_MSK);
376 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
377 return v;
380 static int cx23885_risc_decode(u32 risc)
382 static char *instr[16] = {
383 [RISC_SYNC >> 28] = "sync",
384 [RISC_WRITE >> 28] = "write",
385 [RISC_WRITEC >> 28] = "writec",
386 [RISC_READ >> 28] = "read",
387 [RISC_READC >> 28] = "readc",
388 [RISC_JUMP >> 28] = "jump",
389 [RISC_SKIP >> 28] = "skip",
390 [RISC_WRITERM >> 28] = "writerm",
391 [RISC_WRITECM >> 28] = "writecm",
392 [RISC_WRITECR >> 28] = "writecr",
394 static int incr[16] = {
395 [RISC_WRITE >> 28] = 3,
396 [RISC_JUMP >> 28] = 3,
397 [RISC_SKIP >> 28] = 1,
398 [RISC_SYNC >> 28] = 1,
399 [RISC_WRITERM >> 28] = 3,
400 [RISC_WRITECM >> 28] = 3,
401 [RISC_WRITECR >> 28] = 4,
403 static char *bits[] = {
404 "12", "13", "14", "resync",
405 "cnt0", "cnt1", "18", "19",
406 "20", "21", "22", "23",
407 "irq1", "irq2", "eol", "sol",
409 int i;
411 printk("0x%08x [ %s", risc,
412 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
413 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
414 if (risc & (1 << (i + 12)))
415 printk(" %s", bits[i]);
416 printk(" count=%d ]\n", risc & 0xfff);
417 return incr[risc >> 28] ? incr[risc >> 28] : 1;
420 void cx23885_wakeup(struct cx23885_tsport *port,
421 struct cx23885_dmaqueue *q, u32 count)
423 struct cx23885_dev *dev = port->dev;
424 struct cx23885_buffer *buf;
425 int bc;
427 for (bc = 0;; bc++) {
428 if (list_empty(&q->active))
429 break;
430 buf = list_entry(q->active.next,
431 struct cx23885_buffer, vb.queue);
433 /* count comes from the hw and is is 16bit wide --
434 * this trick handles wrap-arounds correctly for
435 * up to 32767 buffers in flight... */
436 if ((s16) (count - buf->count) < 0)
437 break;
439 do_gettimeofday(&buf->vb.ts);
440 dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.i,
441 count, buf->count);
442 buf->vb.state = VIDEOBUF_DONE;
443 list_del(&buf->vb.queue);
444 wake_up(&buf->vb.done);
446 if (list_empty(&q->active))
447 del_timer(&q->timeout);
448 else
449 mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
450 if (bc != 1)
451 printk(KERN_WARNING "%s: %d buffers handled (should be 1)\n",
452 __func__, bc);
455 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
456 struct sram_channel *ch,
457 unsigned int bpl, u32 risc)
459 unsigned int i, lines;
460 u32 cdt;
462 if (ch->cmds_start == 0) {
463 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
464 ch->name);
465 cx_write(ch->ptr1_reg, 0);
466 cx_write(ch->ptr2_reg, 0);
467 cx_write(ch->cnt2_reg, 0);
468 cx_write(ch->cnt1_reg, 0);
469 return 0;
470 } else {
471 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
472 ch->name);
475 bpl = (bpl + 7) & ~7; /* alignment */
476 cdt = ch->cdt;
477 lines = ch->fifo_size / bpl;
478 if (lines > 6)
479 lines = 6;
480 BUG_ON(lines < 2);
482 cx_write(8 + 0, RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
483 cx_write(8 + 4, 8);
484 cx_write(8 + 8, 0);
486 /* write CDT */
487 for (i = 0; i < lines; i++) {
488 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
489 ch->fifo_start + bpl*i);
490 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
491 cx_write(cdt + 16*i + 4, 0);
492 cx_write(cdt + 16*i + 8, 0);
493 cx_write(cdt + 16*i + 12, 0);
496 /* write CMDS */
497 if (ch->jumponly)
498 cx_write(ch->cmds_start + 0, 8);
499 else
500 cx_write(ch->cmds_start + 0, risc);
501 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
502 cx_write(ch->cmds_start + 8, cdt);
503 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
504 cx_write(ch->cmds_start + 16, ch->ctrl_start);
505 if (ch->jumponly)
506 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
507 else
508 cx_write(ch->cmds_start + 20, 64 >> 2);
509 for (i = 24; i < 80; i += 4)
510 cx_write(ch->cmds_start + i, 0);
512 /* fill registers */
513 cx_write(ch->ptr1_reg, ch->fifo_start);
514 cx_write(ch->ptr2_reg, cdt);
515 cx_write(ch->cnt2_reg, (lines*16) >> 3);
516 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
518 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
519 dev->bridge,
520 ch->name,
521 bpl,
522 lines);
524 return 0;
527 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
528 struct sram_channel *ch)
530 static char *name[] = {
531 "init risc lo",
532 "init risc hi",
533 "cdt base",
534 "cdt size",
535 "iq base",
536 "iq size",
537 "risc pc lo",
538 "risc pc hi",
539 "iq wr ptr",
540 "iq rd ptr",
541 "cdt current",
542 "pci target lo",
543 "pci target hi",
544 "line / byte",
546 u32 risc;
547 unsigned int i, j, n;
549 printk(KERN_WARNING "%s: %s - dma channel status dump\n",
550 dev->name, ch->name);
551 for (i = 0; i < ARRAY_SIZE(name); i++)
552 printk(KERN_WARNING "%s: cmds: %-15s: 0x%08x\n",
553 dev->name, name[i],
554 cx_read(ch->cmds_start + 4*i));
556 for (i = 0; i < 4; i++) {
557 risc = cx_read(ch->cmds_start + 4 * (i + 14));
558 printk(KERN_WARNING "%s: risc%d: ", dev->name, i);
559 cx23885_risc_decode(risc);
561 for (i = 0; i < (64 >> 2); i += n) {
562 risc = cx_read(ch->ctrl_start + 4 * i);
563 /* No consideration for bits 63-32 */
565 printk(KERN_WARNING "%s: (0x%08x) iq %x: ", dev->name,
566 ch->ctrl_start + 4 * i, i);
567 n = cx23885_risc_decode(risc);
568 for (j = 1; j < n; j++) {
569 risc = cx_read(ch->ctrl_start + 4 * (i + j));
570 printk(KERN_WARNING "%s: iq %x: 0x%08x [ arg #%d ]\n",
571 dev->name, i+j, risc, j);
575 printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
576 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
577 printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
578 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
579 printk(KERN_WARNING "%s: ptr1_reg: 0x%08x\n",
580 dev->name, cx_read(ch->ptr1_reg));
581 printk(KERN_WARNING "%s: ptr2_reg: 0x%08x\n",
582 dev->name, cx_read(ch->ptr2_reg));
583 printk(KERN_WARNING "%s: cnt1_reg: 0x%08x\n",
584 dev->name, cx_read(ch->cnt1_reg));
585 printk(KERN_WARNING "%s: cnt2_reg: 0x%08x\n",
586 dev->name, cx_read(ch->cnt2_reg));
589 static void cx23885_risc_disasm(struct cx23885_tsport *port,
590 struct btcx_riscmem *risc)
592 struct cx23885_dev *dev = port->dev;
593 unsigned int i, j, n;
595 printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
596 dev->name, risc->cpu, (unsigned long)risc->dma);
597 for (i = 0; i < (risc->size >> 2); i += n) {
598 printk(KERN_INFO "%s: %04d: ", dev->name, i);
599 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
600 for (j = 1; j < n; j++)
601 printk(KERN_INFO "%s: %04d: 0x%08x [ arg #%d ]\n",
602 dev->name, i + j, risc->cpu[i + j], j);
603 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
604 break;
608 static void cx23885_shutdown(struct cx23885_dev *dev)
610 /* disable RISC controller */
611 cx_write(DEV_CNTRL2, 0);
613 /* Disable all IR activity */
614 cx_write(IR_CNTRL_REG, 0);
616 /* Disable Video A/B activity */
617 cx_write(VID_A_DMA_CTL, 0);
618 cx_write(VID_B_DMA_CTL, 0);
619 cx_write(VID_C_DMA_CTL, 0);
621 /* Disable Audio activity */
622 cx_write(AUD_INT_DMA_CTL, 0);
623 cx_write(AUD_EXT_DMA_CTL, 0);
625 /* Disable Serial port */
626 cx_write(UART_CTL, 0);
628 /* Disable Interrupts */
629 cx23885_irq_disable_all(dev);
630 cx_write(VID_A_INT_MSK, 0);
631 cx_write(VID_B_INT_MSK, 0);
632 cx_write(VID_C_INT_MSK, 0);
633 cx_write(AUDIO_INT_INT_MSK, 0);
634 cx_write(AUDIO_EXT_INT_MSK, 0);
638 static void cx23885_reset(struct cx23885_dev *dev)
640 dprintk(1, "%s()\n", __func__);
642 cx23885_shutdown(dev);
644 cx_write(PCI_INT_STAT, 0xffffffff);
645 cx_write(VID_A_INT_STAT, 0xffffffff);
646 cx_write(VID_B_INT_STAT, 0xffffffff);
647 cx_write(VID_C_INT_STAT, 0xffffffff);
648 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
649 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
650 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
651 cx_write(PAD_CTRL, 0x00500300);
653 mdelay(100);
655 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
656 720*4, 0);
657 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
658 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
659 188*4, 0);
660 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
661 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
662 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
663 188*4, 0);
664 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
665 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
666 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
668 cx23885_gpio_setup(dev);
672 static int cx23885_pci_quirks(struct cx23885_dev *dev)
674 dprintk(1, "%s()\n", __func__);
676 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
677 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
678 * occur on the cx23887 bridge.
680 if (dev->bridge == CX23885_BRIDGE_885)
681 cx_clear(RDR_TLCTL0, 1 << 4);
683 return 0;
686 static int get_resources(struct cx23885_dev *dev)
688 if (request_mem_region(pci_resource_start(dev->pci, 0),
689 pci_resource_len(dev->pci, 0),
690 dev->name))
691 return 0;
693 printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
694 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
696 return -EBUSY;
699 static void cx23885_timeout(unsigned long data);
700 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
701 u32 reg, u32 mask, u32 value);
703 static int cx23885_init_tsport(struct cx23885_dev *dev,
704 struct cx23885_tsport *port, int portno)
706 dprintk(1, "%s(portno=%d)\n", __func__, portno);
708 /* Transport bus init dma queue - Common settings */
709 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
710 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
711 port->vld_misc_val = 0x0;
712 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
714 spin_lock_init(&port->slock);
715 port->dev = dev;
716 port->nr = portno;
718 INIT_LIST_HEAD(&port->mpegq.active);
719 INIT_LIST_HEAD(&port->mpegq.queued);
720 port->mpegq.timeout.function = cx23885_timeout;
721 port->mpegq.timeout.data = (unsigned long)port;
722 init_timer(&port->mpegq.timeout);
724 mutex_init(&port->frontends.lock);
725 INIT_LIST_HEAD(&port->frontends.felist);
726 port->frontends.active_fe_id = 0;
728 /* This should be hardcoded allow a single frontend
729 * attachment to this tsport, keeping the -dvb.c
730 * code clean and safe.
732 if (!port->num_frontends)
733 port->num_frontends = 1;
735 switch (portno) {
736 case 1:
737 port->reg_gpcnt = VID_B_GPCNT;
738 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
739 port->reg_dma_ctl = VID_B_DMA_CTL;
740 port->reg_lngth = VID_B_LNGTH;
741 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
742 port->reg_gen_ctrl = VID_B_GEN_CTL;
743 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
744 port->reg_sop_status = VID_B_SOP_STATUS;
745 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
746 port->reg_vld_misc = VID_B_VLD_MISC;
747 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
748 port->reg_src_sel = VID_B_SRC_SEL;
749 port->reg_ts_int_msk = VID_B_INT_MSK;
750 port->reg_ts_int_stat = VID_B_INT_STAT;
751 port->sram_chno = SRAM_CH03; /* VID_B */
752 port->pci_irqmask = 0x02; /* VID_B bit1 */
753 break;
754 case 2:
755 port->reg_gpcnt = VID_C_GPCNT;
756 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
757 port->reg_dma_ctl = VID_C_DMA_CTL;
758 port->reg_lngth = VID_C_LNGTH;
759 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
760 port->reg_gen_ctrl = VID_C_GEN_CTL;
761 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
762 port->reg_sop_status = VID_C_SOP_STATUS;
763 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
764 port->reg_vld_misc = VID_C_VLD_MISC;
765 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
766 port->reg_src_sel = 0;
767 port->reg_ts_int_msk = VID_C_INT_MSK;
768 port->reg_ts_int_stat = VID_C_INT_STAT;
769 port->sram_chno = SRAM_CH06; /* VID_C */
770 port->pci_irqmask = 0x04; /* VID_C bit2 */
771 break;
772 default:
773 BUG();
776 cx23885_risc_stopper(dev->pci, &port->mpegq.stopper,
777 port->reg_dma_ctl, port->dma_ctl_val, 0x00);
779 return 0;
782 static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
784 switch (cx_read(RDR_CFG2) & 0xff) {
785 case 0x00:
786 /* cx23885 */
787 dev->hwrevision = 0xa0;
788 break;
789 case 0x01:
790 /* CX23885-12Z */
791 dev->hwrevision = 0xa1;
792 break;
793 case 0x02:
794 /* CX23885-13Z/14Z */
795 dev->hwrevision = 0xb0;
796 break;
797 case 0x03:
798 if (dev->pci->device == 0x8880) {
799 /* CX23888-21Z/22Z */
800 dev->hwrevision = 0xc0;
801 } else {
802 /* CX23885-14Z */
803 dev->hwrevision = 0xa4;
805 break;
806 case 0x04:
807 if (dev->pci->device == 0x8880) {
808 /* CX23888-31Z */
809 dev->hwrevision = 0xd0;
810 } else {
811 /* CX23885-15Z, CX23888-31Z */
812 dev->hwrevision = 0xa5;
814 break;
815 case 0x0e:
816 /* CX23887-15Z */
817 dev->hwrevision = 0xc0;
818 break;
819 case 0x0f:
820 /* CX23887-14Z */
821 dev->hwrevision = 0xb1;
822 break;
823 default:
824 printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
825 __func__, dev->hwrevision);
827 if (dev->hwrevision)
828 printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
829 __func__, dev->hwrevision);
830 else
831 printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
832 __func__, dev->hwrevision);
835 /* Find the first v4l2_subdev member of the group id in hw */
836 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
838 struct v4l2_subdev *result = NULL;
839 struct v4l2_subdev *sd;
841 spin_lock(&dev->v4l2_dev.lock);
842 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
843 if (sd->grp_id == hw) {
844 result = sd;
845 break;
848 spin_unlock(&dev->v4l2_dev.lock);
849 return result;
852 static int cx23885_dev_setup(struct cx23885_dev *dev)
854 int i;
856 spin_lock_init(&dev->pci_irqmask_lock);
858 mutex_init(&dev->lock);
859 mutex_init(&dev->gpio_lock);
861 atomic_inc(&dev->refcount);
863 dev->nr = cx23885_devcount++;
864 sprintf(dev->name, "cx23885[%d]", dev->nr);
866 /* Configure the internal memory */
867 if (dev->pci->device == 0x8880) {
868 /* Could be 887 or 888, assume a default */
869 dev->bridge = CX23885_BRIDGE_887;
870 /* Apply a sensible clock frequency for the PCIe bridge */
871 dev->clk_freq = 25000000;
872 dev->sram_channels = cx23887_sram_channels;
873 } else
874 if (dev->pci->device == 0x8852) {
875 dev->bridge = CX23885_BRIDGE_885;
876 /* Apply a sensible clock frequency for the PCIe bridge */
877 dev->clk_freq = 28000000;
878 dev->sram_channels = cx23885_sram_channels;
879 } else
880 BUG();
882 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
883 __func__, dev->bridge);
885 /* board config */
886 dev->board = UNSET;
887 if (card[dev->nr] < cx23885_bcount)
888 dev->board = card[dev->nr];
889 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
890 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
891 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
892 dev->board = cx23885_subids[i].card;
893 if (UNSET == dev->board) {
894 dev->board = CX23885_BOARD_UNKNOWN;
895 cx23885_card_list(dev);
898 /* If the user specific a clk freq override, apply it */
899 if (cx23885_boards[dev->board].clk_freq > 0)
900 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
902 dev->pci_bus = dev->pci->bus->number;
903 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
904 cx23885_irq_add(dev, 0x001f00);
905 if (cx23885_boards[dev->board].cimax > 0)
906 cx23885_irq_add(dev, 0x01800000); /* for CiMaxes */
908 /* External Master 1 Bus */
909 dev->i2c_bus[0].nr = 0;
910 dev->i2c_bus[0].dev = dev;
911 dev->i2c_bus[0].reg_stat = I2C1_STAT;
912 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
913 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
914 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
915 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
916 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
918 /* External Master 2 Bus */
919 dev->i2c_bus[1].nr = 1;
920 dev->i2c_bus[1].dev = dev;
921 dev->i2c_bus[1].reg_stat = I2C2_STAT;
922 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
923 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
924 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
925 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
926 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
928 /* Internal Master 3 Bus */
929 dev->i2c_bus[2].nr = 2;
930 dev->i2c_bus[2].dev = dev;
931 dev->i2c_bus[2].reg_stat = I2C3_STAT;
932 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
933 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
934 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
935 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
936 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
938 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
939 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
940 cx23885_init_tsport(dev, &dev->ts1, 1);
942 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
943 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
944 cx23885_init_tsport(dev, &dev->ts2, 2);
946 if (get_resources(dev) < 0) {
947 printk(KERN_ERR "CORE %s No more PCIe resources for "
948 "subsystem: %04x:%04x\n",
949 dev->name, dev->pci->subsystem_vendor,
950 dev->pci->subsystem_device);
952 cx23885_devcount--;
953 return -ENODEV;
956 /* PCIe stuff */
957 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
958 pci_resource_len(dev->pci, 0));
960 dev->bmmio = (u8 __iomem *)dev->lmmio;
962 printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
963 dev->name, dev->pci->subsystem_vendor,
964 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
965 dev->board, card[dev->nr] == dev->board ?
966 "insmod option" : "autodetected");
968 cx23885_pci_quirks(dev);
970 /* Assume some sensible defaults */
971 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
972 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
973 dev->radio_type = cx23885_boards[dev->board].radio_type;
974 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
976 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x\n",
977 __func__, dev->tuner_type, dev->tuner_addr);
978 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
979 __func__, dev->radio_type, dev->radio_addr);
981 /* The cx23417 encoder has GPIO's that need to be initialised
982 * before DVB, so that demodulators and tuners are out of
983 * reset before DVB uses them.
985 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
986 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
987 cx23885_mc417_init(dev);
989 /* init hardware */
990 cx23885_reset(dev);
992 cx23885_i2c_register(&dev->i2c_bus[0]);
993 cx23885_i2c_register(&dev->i2c_bus[1]);
994 cx23885_i2c_register(&dev->i2c_bus[2]);
995 cx23885_card_setup(dev);
996 call_all(dev, core, s_power, 0);
997 cx23885_ir_init(dev);
999 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
1000 if (cx23885_video_register(dev) < 0) {
1001 printk(KERN_ERR "%s() Failed to register analog "
1002 "video adapters on VID_A\n", __func__);
1006 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1007 if (cx23885_dvb_register(&dev->ts1) < 0) {
1008 printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
1009 __func__);
1011 } else
1012 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1013 if (cx23885_417_register(dev) < 0) {
1014 printk(KERN_ERR
1015 "%s() Failed to register 417 on VID_B\n",
1016 __func__);
1020 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1021 if (cx23885_dvb_register(&dev->ts2) < 0) {
1022 printk(KERN_ERR
1023 "%s() Failed to register dvb on VID_C\n",
1024 __func__);
1026 } else
1027 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1028 if (cx23885_417_register(dev) < 0) {
1029 printk(KERN_ERR
1030 "%s() Failed to register 417 on VID_C\n",
1031 __func__);
1035 cx23885_dev_checkrevision(dev);
1037 return 0;
1040 static void cx23885_dev_unregister(struct cx23885_dev *dev)
1042 release_mem_region(pci_resource_start(dev->pci, 0),
1043 pci_resource_len(dev->pci, 0));
1045 if (!atomic_dec_and_test(&dev->refcount))
1046 return;
1048 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1049 cx23885_video_unregister(dev);
1051 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1052 cx23885_dvb_unregister(&dev->ts1);
1054 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1055 cx23885_417_unregister(dev);
1057 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1058 cx23885_dvb_unregister(&dev->ts2);
1060 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1061 cx23885_417_unregister(dev);
1063 cx23885_i2c_unregister(&dev->i2c_bus[2]);
1064 cx23885_i2c_unregister(&dev->i2c_bus[1]);
1065 cx23885_i2c_unregister(&dev->i2c_bus[0]);
1067 iounmap(dev->lmmio);
1070 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1071 unsigned int offset, u32 sync_line,
1072 unsigned int bpl, unsigned int padding,
1073 unsigned int lines)
1075 struct scatterlist *sg;
1076 unsigned int line, todo;
1078 /* sync instruction */
1079 if (sync_line != NO_SYNC_LINE)
1080 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1082 /* scan lines */
1083 sg = sglist;
1084 for (line = 0; line < lines; line++) {
1085 while (offset && offset >= sg_dma_len(sg)) {
1086 offset -= sg_dma_len(sg);
1087 sg++;
1089 if (bpl <= sg_dma_len(sg)-offset) {
1090 /* fits into current chunk */
1091 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|RISC_EOL|bpl);
1092 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1093 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1094 offset += bpl;
1095 } else {
1096 /* scanline needs to be split */
1097 todo = bpl;
1098 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|
1099 (sg_dma_len(sg)-offset));
1100 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1101 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1102 todo -= (sg_dma_len(sg)-offset);
1103 offset = 0;
1104 sg++;
1105 while (todo > sg_dma_len(sg)) {
1106 *(rp++) = cpu_to_le32(RISC_WRITE|
1107 sg_dma_len(sg));
1108 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1109 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1110 todo -= sg_dma_len(sg);
1111 sg++;
1113 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1114 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1115 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1116 offset += todo;
1118 offset += padding;
1121 return rp;
1124 int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
1125 struct scatterlist *sglist, unsigned int top_offset,
1126 unsigned int bottom_offset, unsigned int bpl,
1127 unsigned int padding, unsigned int lines)
1129 u32 instructions, fields;
1130 __le32 *rp;
1131 int rc;
1133 fields = 0;
1134 if (UNSET != top_offset)
1135 fields++;
1136 if (UNSET != bottom_offset)
1137 fields++;
1139 /* estimate risc mem: worst case is one write per page border +
1140 one write per scan line + syncs + jump (all 2 dwords). Padding
1141 can cause next bpl to start close to a page border. First DMA
1142 region may be smaller than PAGE_SIZE */
1143 /* write and jump need and extra dword */
1144 instructions = fields * (1 + ((bpl + padding) * lines)
1145 / PAGE_SIZE + lines);
1146 instructions += 2;
1147 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1148 if (rc < 0)
1149 return rc;
1151 /* write risc instructions */
1152 rp = risc->cpu;
1153 if (UNSET != top_offset)
1154 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1155 bpl, padding, lines);
1156 if (UNSET != bottom_offset)
1157 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1158 bpl, padding, lines);
1160 /* save pointer to jmp instruction address */
1161 risc->jmp = rp;
1162 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1163 return 0;
1166 static int cx23885_risc_databuffer(struct pci_dev *pci,
1167 struct btcx_riscmem *risc,
1168 struct scatterlist *sglist,
1169 unsigned int bpl,
1170 unsigned int lines)
1172 u32 instructions;
1173 __le32 *rp;
1174 int rc;
1176 /* estimate risc mem: worst case is one write per page border +
1177 one write per scan line + syncs + jump (all 2 dwords). Here
1178 there is no padding and no sync. First DMA region may be smaller
1179 than PAGE_SIZE */
1180 /* Jump and write need an extra dword */
1181 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
1182 instructions += 1;
1184 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1185 if (rc < 0)
1186 return rc;
1188 /* write risc instructions */
1189 rp = risc->cpu;
1190 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0, lines);
1192 /* save pointer to jmp instruction address */
1193 risc->jmp = rp;
1194 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1195 return 0;
1198 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
1199 u32 reg, u32 mask, u32 value)
1201 __le32 *rp;
1202 int rc;
1204 rc = btcx_riscmem_alloc(pci, risc, 4*16);
1205 if (rc < 0)
1206 return rc;
1208 /* write risc instructions */
1209 rp = risc->cpu;
1210 *(rp++) = cpu_to_le32(RISC_WRITECR | RISC_IRQ2);
1211 *(rp++) = cpu_to_le32(reg);
1212 *(rp++) = cpu_to_le32(value);
1213 *(rp++) = cpu_to_le32(mask);
1214 *(rp++) = cpu_to_le32(RISC_JUMP);
1215 *(rp++) = cpu_to_le32(risc->dma);
1216 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1217 return 0;
1220 void cx23885_free_buffer(struct videobuf_queue *q, struct cx23885_buffer *buf)
1222 struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
1224 BUG_ON(in_interrupt());
1225 videobuf_waiton(q, &buf->vb, 0, 0);
1226 videobuf_dma_unmap(q->dev, dma);
1227 videobuf_dma_free(dma);
1228 btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
1229 buf->vb.state = VIDEOBUF_NEEDS_INIT;
1232 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1234 struct cx23885_dev *dev = port->dev;
1236 dprintk(1, "%s() Register Dump\n", __func__);
1237 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
1238 cx_read(DEV_CNTRL2));
1239 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
1240 cx23885_irq_get_mask(dev));
1241 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
1242 cx_read(AUDIO_INT_INT_MSK));
1243 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
1244 cx_read(AUD_INT_DMA_CTL));
1245 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
1246 cx_read(AUDIO_EXT_INT_MSK));
1247 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
1248 cx_read(AUD_EXT_DMA_CTL));
1249 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
1250 cx_read(PAD_CTRL));
1251 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
1252 cx_read(ALT_PIN_OUT_SEL));
1253 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
1254 cx_read(GPIO2));
1255 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
1256 port->reg_gpcnt, cx_read(port->reg_gpcnt));
1257 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
1258 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1259 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
1260 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1261 if (port->reg_src_sel)
1262 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1263 port->reg_src_sel, cx_read(port->reg_src_sel));
1264 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
1265 port->reg_lngth, cx_read(port->reg_lngth));
1266 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
1267 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1268 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
1269 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1270 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
1271 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1272 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
1273 port->reg_sop_status, cx_read(port->reg_sop_status));
1274 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1275 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1276 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
1277 port->reg_vld_misc, cx_read(port->reg_vld_misc));
1278 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
1279 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1280 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
1281 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1284 static int cx23885_start_dma(struct cx23885_tsport *port,
1285 struct cx23885_dmaqueue *q,
1286 struct cx23885_buffer *buf)
1288 struct cx23885_dev *dev = port->dev;
1289 u32 reg;
1291 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1292 buf->vb.width, buf->vb.height, buf->vb.field);
1294 /* Stop the fifo and risc engine for this port */
1295 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1297 /* setup fifo + format */
1298 cx23885_sram_channel_setup(dev,
1299 &dev->sram_channels[port->sram_chno],
1300 port->ts_packet_size, buf->risc.dma);
1301 if (debug > 5) {
1302 cx23885_sram_channel_dump(dev,
1303 &dev->sram_channels[port->sram_chno]);
1304 cx23885_risc_disasm(port, &buf->risc);
1307 /* write TS length to chip */
1308 cx_write(port->reg_lngth, buf->vb.width);
1310 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1311 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1312 printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1313 __func__,
1314 cx23885_boards[dev->board].portb,
1315 cx23885_boards[dev->board].portc);
1316 return -EINVAL;
1319 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1320 cx23885_av_clk(dev, 0);
1322 udelay(100);
1324 /* If the port supports SRC SELECT, configure it */
1325 if (port->reg_src_sel)
1326 cx_write(port->reg_src_sel, port->src_sel_val);
1328 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1329 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1330 cx_write(port->reg_vld_misc, port->vld_misc_val);
1331 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1332 udelay(100);
1334 /* NOTE: this is 2 (reserved) for portb, does it matter? */
1335 /* reset counter to zero */
1336 cx_write(port->reg_gpcnt_ctl, 3);
1337 q->count = 1;
1339 /* Set VIDB pins to input */
1340 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1341 reg = cx_read(PAD_CTRL);
1342 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1343 cx_write(PAD_CTRL, reg);
1346 /* Set VIDC pins to input */
1347 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1348 reg = cx_read(PAD_CTRL);
1349 reg &= ~0x4; /* Clear TS2_SOP_OE */
1350 cx_write(PAD_CTRL, reg);
1353 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1355 reg = cx_read(PAD_CTRL);
1356 reg = reg & ~0x1; /* Clear TS1_OE */
1358 /* FIXME, bit 2 writing here is questionable */
1359 /* set TS1_SOP_OE and TS1_OE_HI */
1360 reg = reg | 0xa;
1361 cx_write(PAD_CTRL, reg);
1363 /* FIXME and these two registers should be documented. */
1364 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1365 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1368 switch (dev->bridge) {
1369 case CX23885_BRIDGE_885:
1370 case CX23885_BRIDGE_887:
1371 case CX23885_BRIDGE_888:
1372 /* enable irqs */
1373 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1374 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1375 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1376 cx23885_irq_add(dev, port->pci_irqmask);
1377 cx23885_irq_enable_all(dev);
1378 break;
1379 default:
1380 BUG();
1383 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1385 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1386 cx23885_av_clk(dev, 1);
1388 if (debug > 4)
1389 cx23885_tsport_reg_dump(port);
1391 return 0;
1394 static int cx23885_stop_dma(struct cx23885_tsport *port)
1396 struct cx23885_dev *dev = port->dev;
1397 u32 reg;
1399 dprintk(1, "%s()\n", __func__);
1401 /* Stop interrupts and DMA */
1402 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1403 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1405 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1407 reg = cx_read(PAD_CTRL);
1409 /* Set TS1_OE */
1410 reg = reg | 0x1;
1412 /* clear TS1_SOP_OE and TS1_OE_HI */
1413 reg = reg & ~0xa;
1414 cx_write(PAD_CTRL, reg);
1415 cx_write(port->reg_src_sel, 0);
1416 cx_write(port->reg_gen_ctrl, 8);
1420 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1421 cx23885_av_clk(dev, 0);
1423 return 0;
1426 int cx23885_restart_queue(struct cx23885_tsport *port,
1427 struct cx23885_dmaqueue *q)
1429 struct cx23885_dev *dev = port->dev;
1430 struct cx23885_buffer *buf;
1432 dprintk(5, "%s()\n", __func__);
1433 if (list_empty(&q->active)) {
1434 struct cx23885_buffer *prev;
1435 prev = NULL;
1437 dprintk(5, "%s() queue is empty\n", __func__);
1439 for (;;) {
1440 if (list_empty(&q->queued))
1441 return 0;
1442 buf = list_entry(q->queued.next, struct cx23885_buffer,
1443 vb.queue);
1444 if (NULL == prev) {
1445 list_del(&buf->vb.queue);
1446 list_add_tail(&buf->vb.queue, &q->active);
1447 cx23885_start_dma(port, q, buf);
1448 buf->vb.state = VIDEOBUF_ACTIVE;
1449 buf->count = q->count++;
1450 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
1451 dprintk(5, "[%p/%d] restart_queue - f/active\n",
1452 buf, buf->vb.i);
1454 } else if (prev->vb.width == buf->vb.width &&
1455 prev->vb.height == buf->vb.height &&
1456 prev->fmt == buf->fmt) {
1457 list_del(&buf->vb.queue);
1458 list_add_tail(&buf->vb.queue, &q->active);
1459 buf->vb.state = VIDEOBUF_ACTIVE;
1460 buf->count = q->count++;
1461 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1462 /* 64 bit bits 63-32 */
1463 prev->risc.jmp[2] = cpu_to_le32(0);
1464 dprintk(5, "[%p/%d] restart_queue - m/active\n",
1465 buf, buf->vb.i);
1466 } else {
1467 return 0;
1469 prev = buf;
1471 return 0;
1474 buf = list_entry(q->active.next, struct cx23885_buffer, vb.queue);
1475 dprintk(2, "restart_queue [%p/%d]: restart dma\n",
1476 buf, buf->vb.i);
1477 cx23885_start_dma(port, q, buf);
1478 list_for_each_entry(buf, &q->active, vb.queue)
1479 buf->count = q->count++;
1480 mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
1481 return 0;
1484 /* ------------------------------------------------------------------ */
1486 int cx23885_buf_prepare(struct videobuf_queue *q, struct cx23885_tsport *port,
1487 struct cx23885_buffer *buf, enum v4l2_field field)
1489 struct cx23885_dev *dev = port->dev;
1490 int size = port->ts_packet_size * port->ts_packet_count;
1491 int rc;
1493 dprintk(1, "%s: %p\n", __func__, buf);
1494 if (0 != buf->vb.baddr && buf->vb.bsize < size)
1495 return -EINVAL;
1497 if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
1498 buf->vb.width = port->ts_packet_size;
1499 buf->vb.height = port->ts_packet_count;
1500 buf->vb.size = size;
1501 buf->vb.field = field /*V4L2_FIELD_TOP*/;
1503 rc = videobuf_iolock(q, &buf->vb, NULL);
1504 if (0 != rc)
1505 goto fail;
1506 cx23885_risc_databuffer(dev->pci, &buf->risc,
1507 videobuf_to_dma(&buf->vb)->sglist,
1508 buf->vb.width, buf->vb.height);
1510 buf->vb.state = VIDEOBUF_PREPARED;
1511 return 0;
1513 fail:
1514 cx23885_free_buffer(q, buf);
1515 return rc;
1518 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1520 struct cx23885_buffer *prev;
1521 struct cx23885_dev *dev = port->dev;
1522 struct cx23885_dmaqueue *cx88q = &port->mpegq;
1524 /* add jump to stopper */
1525 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
1526 buf->risc.jmp[1] = cpu_to_le32(cx88q->stopper.dma);
1527 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1529 if (list_empty(&cx88q->active)) {
1530 dprintk(1, "queue is empty - first active\n");
1531 list_add_tail(&buf->vb.queue, &cx88q->active);
1532 cx23885_start_dma(port, cx88q, buf);
1533 buf->vb.state = VIDEOBUF_ACTIVE;
1534 buf->count = cx88q->count++;
1535 mod_timer(&cx88q->timeout, jiffies + BUFFER_TIMEOUT);
1536 dprintk(1, "[%p/%d] %s - first active\n",
1537 buf, buf->vb.i, __func__);
1538 } else {
1539 dprintk(1, "queue is not empty - append to active\n");
1540 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1541 vb.queue);
1542 list_add_tail(&buf->vb.queue, &cx88q->active);
1543 buf->vb.state = VIDEOBUF_ACTIVE;
1544 buf->count = cx88q->count++;
1545 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1546 prev->risc.jmp[2] = cpu_to_le32(0); /* 64 bit bits 63-32 */
1547 dprintk(1, "[%p/%d] %s - append to active\n",
1548 buf, buf->vb.i, __func__);
1552 /* ----------------------------------------------------------- */
1554 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason,
1555 int restart)
1557 struct cx23885_dev *dev = port->dev;
1558 struct cx23885_dmaqueue *q = &port->mpegq;
1559 struct cx23885_buffer *buf;
1560 unsigned long flags;
1562 spin_lock_irqsave(&port->slock, flags);
1563 while (!list_empty(&q->active)) {
1564 buf = list_entry(q->active.next, struct cx23885_buffer,
1565 vb.queue);
1566 list_del(&buf->vb.queue);
1567 buf->vb.state = VIDEOBUF_ERROR;
1568 wake_up(&buf->vb.done);
1569 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1570 buf, buf->vb.i, reason, (unsigned long)buf->risc.dma);
1572 if (restart) {
1573 dprintk(1, "restarting queue\n");
1574 cx23885_restart_queue(port, q);
1576 spin_unlock_irqrestore(&port->slock, flags);
1579 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1581 struct cx23885_dev *dev = port->dev;
1582 struct cx23885_dmaqueue *q = &port->mpegq;
1584 dprintk(1, "%s()\n", __func__);
1585 del_timer_sync(&q->timeout);
1586 cx23885_stop_dma(port);
1587 do_cancel_buffers(port, "cancel", 0);
1590 static void cx23885_timeout(unsigned long data)
1592 struct cx23885_tsport *port = (struct cx23885_tsport *)data;
1593 struct cx23885_dev *dev = port->dev;
1595 dprintk(1, "%s()\n", __func__);
1597 if (debug > 5)
1598 cx23885_sram_channel_dump(dev,
1599 &dev->sram_channels[port->sram_chno]);
1601 cx23885_stop_dma(port);
1602 do_cancel_buffers(port, "timeout", 1);
1605 int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1607 /* FIXME: port1 assumption here. */
1608 struct cx23885_tsport *port = &dev->ts1;
1609 int count = 0;
1610 int handled = 0;
1612 if (status == 0)
1613 return handled;
1615 count = cx_read(port->reg_gpcnt);
1616 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1617 status, cx_read(port->reg_ts_int_msk), count);
1619 if ((status & VID_B_MSK_BAD_PKT) ||
1620 (status & VID_B_MSK_OPC_ERR) ||
1621 (status & VID_B_MSK_VBI_OPC_ERR) ||
1622 (status & VID_B_MSK_SYNC) ||
1623 (status & VID_B_MSK_VBI_SYNC) ||
1624 (status & VID_B_MSK_OF) ||
1625 (status & VID_B_MSK_VBI_OF)) {
1626 printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
1627 "= 0x%x\n", dev->name, status);
1628 if (status & VID_B_MSK_BAD_PKT)
1629 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1630 if (status & VID_B_MSK_OPC_ERR)
1631 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1632 if (status & VID_B_MSK_VBI_OPC_ERR)
1633 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1634 if (status & VID_B_MSK_SYNC)
1635 dprintk(1, " VID_B_MSK_SYNC\n");
1636 if (status & VID_B_MSK_VBI_SYNC)
1637 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1638 if (status & VID_B_MSK_OF)
1639 dprintk(1, " VID_B_MSK_OF\n");
1640 if (status & VID_B_MSK_VBI_OF)
1641 dprintk(1, " VID_B_MSK_VBI_OF\n");
1643 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1644 cx23885_sram_channel_dump(dev,
1645 &dev->sram_channels[port->sram_chno]);
1646 cx23885_417_check_encoder(dev);
1647 } else if (status & VID_B_MSK_RISCI1) {
1648 dprintk(7, " VID_B_MSK_RISCI1\n");
1649 spin_lock(&port->slock);
1650 cx23885_wakeup(port, &port->mpegq, count);
1651 spin_unlock(&port->slock);
1652 } else if (status & VID_B_MSK_RISCI2) {
1653 dprintk(7, " VID_B_MSK_RISCI2\n");
1654 spin_lock(&port->slock);
1655 cx23885_restart_queue(port, &port->mpegq);
1656 spin_unlock(&port->slock);
1658 if (status) {
1659 cx_write(port->reg_ts_int_stat, status);
1660 handled = 1;
1663 return handled;
1666 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1668 struct cx23885_dev *dev = port->dev;
1669 int handled = 0;
1670 u32 count;
1672 if ((status & VID_BC_MSK_OPC_ERR) ||
1673 (status & VID_BC_MSK_BAD_PKT) ||
1674 (status & VID_BC_MSK_SYNC) ||
1675 (status & VID_BC_MSK_OF)) {
1677 if (status & VID_BC_MSK_OPC_ERR)
1678 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1679 VID_BC_MSK_OPC_ERR);
1681 if (status & VID_BC_MSK_BAD_PKT)
1682 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1683 VID_BC_MSK_BAD_PKT);
1685 if (status & VID_BC_MSK_SYNC)
1686 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1687 VID_BC_MSK_SYNC);
1689 if (status & VID_BC_MSK_OF)
1690 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1691 VID_BC_MSK_OF);
1693 printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1695 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1696 cx23885_sram_channel_dump(dev,
1697 &dev->sram_channels[port->sram_chno]);
1699 } else if (status & VID_BC_MSK_RISCI1) {
1701 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1703 spin_lock(&port->slock);
1704 count = cx_read(port->reg_gpcnt);
1705 cx23885_wakeup(port, &port->mpegq, count);
1706 spin_unlock(&port->slock);
1708 } else if (status & VID_BC_MSK_RISCI2) {
1710 dprintk(7, " (RISCI2 0x%08x)\n", VID_BC_MSK_RISCI2);
1712 spin_lock(&port->slock);
1713 cx23885_restart_queue(port, &port->mpegq);
1714 spin_unlock(&port->slock);
1717 if (status) {
1718 cx_write(port->reg_ts_int_stat, status);
1719 handled = 1;
1722 return handled;
1725 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1727 struct cx23885_dev *dev = dev_id;
1728 struct cx23885_tsport *ts1 = &dev->ts1;
1729 struct cx23885_tsport *ts2 = &dev->ts2;
1730 u32 pci_status, pci_mask;
1731 u32 vida_status, vida_mask;
1732 u32 ts1_status, ts1_mask;
1733 u32 ts2_status, ts2_mask;
1734 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1735 bool subdev_handled;
1737 pci_status = cx_read(PCI_INT_STAT);
1738 pci_mask = cx23885_irq_get_mask(dev);
1739 vida_status = cx_read(VID_A_INT_STAT);
1740 vida_mask = cx_read(VID_A_INT_MSK);
1741 ts1_status = cx_read(VID_B_INT_STAT);
1742 ts1_mask = cx_read(VID_B_INT_MSK);
1743 ts2_status = cx_read(VID_C_INT_STAT);
1744 ts2_mask = cx_read(VID_C_INT_MSK);
1746 if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
1747 goto out;
1749 vida_count = cx_read(VID_A_GPCNT);
1750 ts1_count = cx_read(ts1->reg_gpcnt);
1751 ts2_count = cx_read(ts2->reg_gpcnt);
1752 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1753 pci_status, pci_mask);
1754 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1755 vida_status, vida_mask, vida_count);
1756 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1757 ts1_status, ts1_mask, ts1_count);
1758 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1759 ts2_status, ts2_mask, ts2_count);
1761 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1762 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1763 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1764 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1765 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
1766 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1768 if (pci_status & PCI_MSK_RISC_RD)
1769 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1770 PCI_MSK_RISC_RD);
1772 if (pci_status & PCI_MSK_RISC_WR)
1773 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1774 PCI_MSK_RISC_WR);
1776 if (pci_status & PCI_MSK_AL_RD)
1777 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1778 PCI_MSK_AL_RD);
1780 if (pci_status & PCI_MSK_AL_WR)
1781 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1782 PCI_MSK_AL_WR);
1784 if (pci_status & PCI_MSK_APB_DMA)
1785 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1786 PCI_MSK_APB_DMA);
1788 if (pci_status & PCI_MSK_VID_C)
1789 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1790 PCI_MSK_VID_C);
1792 if (pci_status & PCI_MSK_VID_B)
1793 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1794 PCI_MSK_VID_B);
1796 if (pci_status & PCI_MSK_VID_A)
1797 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1798 PCI_MSK_VID_A);
1800 if (pci_status & PCI_MSK_AUD_INT)
1801 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1802 PCI_MSK_AUD_INT);
1804 if (pci_status & PCI_MSK_AUD_EXT)
1805 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1806 PCI_MSK_AUD_EXT);
1808 if (pci_status & PCI_MSK_GPIO0)
1809 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1810 PCI_MSK_GPIO0);
1812 if (pci_status & PCI_MSK_GPIO1)
1813 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1814 PCI_MSK_GPIO1);
1816 if (pci_status & PCI_MSK_AV_CORE)
1817 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1818 PCI_MSK_AV_CORE);
1820 if (pci_status & PCI_MSK_IR)
1821 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1822 PCI_MSK_IR);
1825 if (cx23885_boards[dev->board].cimax > 0 &&
1826 ((pci_status & PCI_MSK_GPIO0) ||
1827 (pci_status & PCI_MSK_GPIO1))) {
1829 if (cx23885_boards[dev->board].cimax > 0)
1830 handled += netup_ci_slot_status(dev, pci_status);
1834 if (ts1_status) {
1835 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1836 handled += cx23885_irq_ts(ts1, ts1_status);
1837 else
1838 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1839 handled += cx23885_irq_417(dev, ts1_status);
1842 if (ts2_status) {
1843 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1844 handled += cx23885_irq_ts(ts2, ts2_status);
1845 else
1846 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1847 handled += cx23885_irq_417(dev, ts2_status);
1850 if (vida_status)
1851 handled += cx23885_video_irq(dev, vida_status);
1853 if (pci_status & PCI_MSK_IR) {
1854 subdev_handled = false;
1855 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1856 pci_status, &subdev_handled);
1857 if (subdev_handled)
1858 handled++;
1861 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1862 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1863 if (!schedule_work(&dev->cx25840_work))
1864 printk(KERN_ERR "%s: failed to set up deferred work for"
1865 " AV Core/IR interrupt. Interrupt is disabled"
1866 " and won't be re-enabled\n", dev->name);
1867 handled++;
1870 if (handled)
1871 cx_write(PCI_INT_STAT, pci_status);
1872 out:
1873 return IRQ_RETVAL(handled);
1876 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1877 unsigned int notification, void *arg)
1879 struct cx23885_dev *dev;
1881 if (sd == NULL)
1882 return;
1884 dev = to_cx23885(sd->v4l2_dev);
1886 switch (notification) {
1887 case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1888 if (sd == dev->sd_ir)
1889 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1890 break;
1891 case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1892 if (sd == dev->sd_ir)
1893 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1894 break;
1898 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1900 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1901 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1902 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1903 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1906 static inline int encoder_on_portb(struct cx23885_dev *dev)
1908 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1911 static inline int encoder_on_portc(struct cx23885_dev *dev)
1913 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1916 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1917 * registers depending on the board configuration (and whether the
1918 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1919 * be pushed into the correct hardware register, regardless of the
1920 * physical location. Certain registers are shared so we sanity check
1921 * and report errors if we think we're tampering with a GPIo that might
1922 * be assigned to the encoder (and used for the host bus).
1924 * GPIO 2 thru 0 - On the cx23885 bridge
1925 * GPIO 18 thru 3 - On the cx23417 host bus interface
1926 * GPIO 23 thru 19 - On the cx25840 a/v core
1928 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1930 if (mask & 0x7)
1931 cx_set(GP0_IO, mask & 0x7);
1933 if (mask & 0x0007fff8) {
1934 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1935 printk(KERN_ERR
1936 "%s: Setting GPIO on encoder ports\n",
1937 dev->name);
1938 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1941 /* TODO: 23-19 */
1942 if (mask & 0x00f80000)
1943 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1946 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1948 if (mask & 0x00000007)
1949 cx_clear(GP0_IO, mask & 0x7);
1951 if (mask & 0x0007fff8) {
1952 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1953 printk(KERN_ERR
1954 "%s: Clearing GPIO moving on encoder ports\n",
1955 dev->name);
1956 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1959 /* TODO: 23-19 */
1960 if (mask & 0x00f80000)
1961 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1964 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
1966 if (mask & 0x00000007)
1967 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
1969 if (mask & 0x0007fff8) {
1970 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1971 printk(KERN_ERR
1972 "%s: Reading GPIO moving on encoder ports\n",
1973 dev->name);
1974 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
1977 /* TODO: 23-19 */
1978 if (mask & 0x00f80000)
1979 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1981 return 0;
1984 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1986 if ((mask & 0x00000007) && asoutput)
1987 cx_set(GP0_IO, (mask & 0x7) << 16);
1988 else if ((mask & 0x00000007) && !asoutput)
1989 cx_clear(GP0_IO, (mask & 0x7) << 16);
1991 if (mask & 0x0007fff8) {
1992 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1993 printk(KERN_ERR
1994 "%s: Enabling GPIO on encoder ports\n",
1995 dev->name);
1998 /* MC417_OEN is active low for output, write 1 for an input */
1999 if ((mask & 0x0007fff8) && asoutput)
2000 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
2002 else if ((mask & 0x0007fff8) && !asoutput)
2003 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
2005 /* TODO: 23-19 */
2008 static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
2009 const struct pci_device_id *pci_id)
2011 struct cx23885_dev *dev;
2012 int err;
2014 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2015 if (NULL == dev)
2016 return -ENOMEM;
2018 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
2019 if (err < 0)
2020 goto fail_free;
2022 /* Prepare to handle notifications from subdevices */
2023 cx23885_v4l2_dev_notify_init(dev);
2025 /* pci init */
2026 dev->pci = pci_dev;
2027 if (pci_enable_device(pci_dev)) {
2028 err = -EIO;
2029 goto fail_unreg;
2032 if (cx23885_dev_setup(dev) < 0) {
2033 err = -EINVAL;
2034 goto fail_unreg;
2037 /* print pci info */
2038 pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &dev->pci_rev);
2039 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
2040 printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
2041 "latency: %d, mmio: 0x%llx\n", dev->name,
2042 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
2043 dev->pci_lat,
2044 (unsigned long long)pci_resource_start(pci_dev, 0));
2046 pci_set_master(pci_dev);
2047 if (!pci_dma_supported(pci_dev, 0xffffffff)) {
2048 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
2049 err = -EIO;
2050 goto fail_irq;
2053 if (!pci_enable_msi(pci_dev))
2054 err = request_irq(pci_dev->irq, cx23885_irq,
2055 IRQF_DISABLED, dev->name, dev);
2056 else
2057 err = request_irq(pci_dev->irq, cx23885_irq,
2058 IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
2059 if (err < 0) {
2060 printk(KERN_ERR "%s: can't get IRQ %d\n",
2061 dev->name, pci_dev->irq);
2062 goto fail_irq;
2065 switch (dev->board) {
2066 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2067 cx23885_irq_add_enable(dev, 0x01800000); /* for NetUP */
2068 break;
2072 * The CX2388[58] IR controller can start firing interrupts when
2073 * enabled, so these have to take place after the cx23885_irq() handler
2074 * is hooked up by the call to request_irq() above.
2076 cx23885_ir_pci_int_enable(dev);
2077 cx23885_input_init(dev);
2079 return 0;
2081 fail_irq:
2082 cx23885_dev_unregister(dev);
2083 fail_unreg:
2084 v4l2_device_unregister(&dev->v4l2_dev);
2085 fail_free:
2086 kfree(dev);
2087 return err;
2090 static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
2092 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2093 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2095 cx23885_input_fini(dev);
2096 cx23885_ir_fini(dev);
2098 cx23885_shutdown(dev);
2100 pci_disable_device(pci_dev);
2102 /* unregister stuff */
2103 free_irq(pci_dev->irq, dev);
2104 pci_disable_msi(pci_dev);
2106 cx23885_dev_unregister(dev);
2107 v4l2_device_unregister(v4l2_dev);
2108 kfree(dev);
2111 static struct pci_device_id cx23885_pci_tbl[] = {
2113 /* CX23885 */
2114 .vendor = 0x14f1,
2115 .device = 0x8852,
2116 .subvendor = PCI_ANY_ID,
2117 .subdevice = PCI_ANY_ID,
2118 }, {
2119 /* CX23887 Rev 2 */
2120 .vendor = 0x14f1,
2121 .device = 0x8880,
2122 .subvendor = PCI_ANY_ID,
2123 .subdevice = PCI_ANY_ID,
2124 }, {
2125 /* --- end of list --- */
2128 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2130 static struct pci_driver cx23885_pci_driver = {
2131 .name = "cx23885",
2132 .id_table = cx23885_pci_tbl,
2133 .probe = cx23885_initdev,
2134 .remove = __devexit_p(cx23885_finidev),
2135 /* TODO */
2136 .suspend = NULL,
2137 .resume = NULL,
2140 static int __init cx23885_init(void)
2142 printk(KERN_INFO "cx23885 driver version %d.%d.%d loaded\n",
2143 (CX23885_VERSION_CODE >> 16) & 0xff,
2144 (CX23885_VERSION_CODE >> 8) & 0xff,
2145 CX23885_VERSION_CODE & 0xff);
2146 #ifdef SNAPSHOT
2147 printk(KERN_INFO "cx23885: snapshot date %04d-%02d-%02d\n",
2148 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
2149 #endif
2150 return pci_register_driver(&cx23885_pci_driver);
2153 static void __exit cx23885_fini(void)
2155 pci_unregister_driver(&cx23885_pci_driver);
2158 module_init(cx23885_init);
2159 module_exit(cx23885_fini);
2161 /* ----------------------------------------------------------- */