Merge remote-tracking branch 'moduleh/module.h-split'
[linux-2.6/next.git] / drivers / media / video / cx23885 / cx23885-core.c
blobee41a8882f58470db0f77057c318ca055bcb3802
1 /*
2 * Driver for the Conexant CX23885 PCIe bridge
4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/kmod.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <asm/div64.h>
32 #include <linux/firmware.h>
34 #include "cx23885.h"
35 #include "cimax2.h"
36 #include "altera-ci.h"
37 #include "cx23888-ir.h"
38 #include "cx23885-ir.h"
39 #include "cx23885-av.h"
40 #include "cx23885-input.h"
42 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
43 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(CX23885_VERSION);
47 static unsigned int debug;
48 module_param(debug, int, 0644);
49 MODULE_PARM_DESC(debug, "enable debug messages");
51 static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
52 module_param_array(card, int, NULL, 0444);
53 MODULE_PARM_DESC(card, "card type");
55 #define dprintk(level, fmt, arg...)\
56 do { if (debug >= level)\
57 printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
58 } while (0)
60 static unsigned int cx23885_devcount;
62 #define NO_SYNC_LINE (-1U)
64 /* FIXME, these allocations will change when
65 * analog arrives. The be reviewed.
66 * CX23887 Assumptions
67 * 1 line = 16 bytes of CDT
68 * cmds size = 80
69 * cdt size = 16 * linesize
70 * iqsize = 64
71 * maxlines = 6
73 * Address Space:
74 * 0x00000000 0x00008fff FIFO clusters
75 * 0x00010000 0x000104af Channel Management Data Structures
76 * 0x000104b0 0x000104ff Free
77 * 0x00010500 0x000108bf 15 channels * iqsize
78 * 0x000108c0 0x000108ff Free
79 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
80 * 15 channels * (iqsize + (maxlines * linesize))
81 * 0x00010ea0 0x00010xxx Free
84 static struct sram_channel cx23885_sram_channels[] = {
85 [SRAM_CH01] = {
86 .name = "VID A",
87 .cmds_start = 0x10000,
88 .ctrl_start = 0x10380,
89 .cdt = 0x104c0,
90 .fifo_start = 0x40,
91 .fifo_size = 0x2800,
92 .ptr1_reg = DMA1_PTR1,
93 .ptr2_reg = DMA1_PTR2,
94 .cnt1_reg = DMA1_CNT1,
95 .cnt2_reg = DMA1_CNT2,
97 [SRAM_CH02] = {
98 .name = "ch2",
99 .cmds_start = 0x0,
100 .ctrl_start = 0x0,
101 .cdt = 0x0,
102 .fifo_start = 0x0,
103 .fifo_size = 0x0,
104 .ptr1_reg = DMA2_PTR1,
105 .ptr2_reg = DMA2_PTR2,
106 .cnt1_reg = DMA2_CNT1,
107 .cnt2_reg = DMA2_CNT2,
109 [SRAM_CH03] = {
110 .name = "TS1 B",
111 .cmds_start = 0x100A0,
112 .ctrl_start = 0x10400,
113 .cdt = 0x10580,
114 .fifo_start = 0x5000,
115 .fifo_size = 0x1000,
116 .ptr1_reg = DMA3_PTR1,
117 .ptr2_reg = DMA3_PTR2,
118 .cnt1_reg = DMA3_CNT1,
119 .cnt2_reg = DMA3_CNT2,
121 [SRAM_CH04] = {
122 .name = "ch4",
123 .cmds_start = 0x0,
124 .ctrl_start = 0x0,
125 .cdt = 0x0,
126 .fifo_start = 0x0,
127 .fifo_size = 0x0,
128 .ptr1_reg = DMA4_PTR1,
129 .ptr2_reg = DMA4_PTR2,
130 .cnt1_reg = DMA4_CNT1,
131 .cnt2_reg = DMA4_CNT2,
133 [SRAM_CH05] = {
134 .name = "ch5",
135 .cmds_start = 0x0,
136 .ctrl_start = 0x0,
137 .cdt = 0x0,
138 .fifo_start = 0x0,
139 .fifo_size = 0x0,
140 .ptr1_reg = DMA5_PTR1,
141 .ptr2_reg = DMA5_PTR2,
142 .cnt1_reg = DMA5_CNT1,
143 .cnt2_reg = DMA5_CNT2,
145 [SRAM_CH06] = {
146 .name = "TS2 C",
147 .cmds_start = 0x10140,
148 .ctrl_start = 0x10440,
149 .cdt = 0x105e0,
150 .fifo_start = 0x6000,
151 .fifo_size = 0x1000,
152 .ptr1_reg = DMA5_PTR1,
153 .ptr2_reg = DMA5_PTR2,
154 .cnt1_reg = DMA5_CNT1,
155 .cnt2_reg = DMA5_CNT2,
157 [SRAM_CH07] = {
158 .name = "ch7",
159 .cmds_start = 0x0,
160 .ctrl_start = 0x0,
161 .cdt = 0x0,
162 .fifo_start = 0x0,
163 .fifo_size = 0x0,
164 .ptr1_reg = DMA6_PTR1,
165 .ptr2_reg = DMA6_PTR2,
166 .cnt1_reg = DMA6_CNT1,
167 .cnt2_reg = DMA6_CNT2,
169 [SRAM_CH08] = {
170 .name = "ch8",
171 .cmds_start = 0x0,
172 .ctrl_start = 0x0,
173 .cdt = 0x0,
174 .fifo_start = 0x0,
175 .fifo_size = 0x0,
176 .ptr1_reg = DMA7_PTR1,
177 .ptr2_reg = DMA7_PTR2,
178 .cnt1_reg = DMA7_CNT1,
179 .cnt2_reg = DMA7_CNT2,
181 [SRAM_CH09] = {
182 .name = "ch9",
183 .cmds_start = 0x0,
184 .ctrl_start = 0x0,
185 .cdt = 0x0,
186 .fifo_start = 0x0,
187 .fifo_size = 0x0,
188 .ptr1_reg = DMA8_PTR1,
189 .ptr2_reg = DMA8_PTR2,
190 .cnt1_reg = DMA8_CNT1,
191 .cnt2_reg = DMA8_CNT2,
195 static struct sram_channel cx23887_sram_channels[] = {
196 [SRAM_CH01] = {
197 .name = "VID A",
198 .cmds_start = 0x10000,
199 .ctrl_start = 0x105b0,
200 .cdt = 0x107b0,
201 .fifo_start = 0x40,
202 .fifo_size = 0x2800,
203 .ptr1_reg = DMA1_PTR1,
204 .ptr2_reg = DMA1_PTR2,
205 .cnt1_reg = DMA1_CNT1,
206 .cnt2_reg = DMA1_CNT2,
208 [SRAM_CH02] = {
209 .name = "ch2",
210 .cmds_start = 0x0,
211 .ctrl_start = 0x0,
212 .cdt = 0x0,
213 .fifo_start = 0x0,
214 .fifo_size = 0x0,
215 .ptr1_reg = DMA2_PTR1,
216 .ptr2_reg = DMA2_PTR2,
217 .cnt1_reg = DMA2_CNT1,
218 .cnt2_reg = DMA2_CNT2,
220 [SRAM_CH03] = {
221 .name = "TS1 B",
222 .cmds_start = 0x100A0,
223 .ctrl_start = 0x10630,
224 .cdt = 0x10870,
225 .fifo_start = 0x5000,
226 .fifo_size = 0x1000,
227 .ptr1_reg = DMA3_PTR1,
228 .ptr2_reg = DMA3_PTR2,
229 .cnt1_reg = DMA3_CNT1,
230 .cnt2_reg = DMA3_CNT2,
232 [SRAM_CH04] = {
233 .name = "ch4",
234 .cmds_start = 0x0,
235 .ctrl_start = 0x0,
236 .cdt = 0x0,
237 .fifo_start = 0x0,
238 .fifo_size = 0x0,
239 .ptr1_reg = DMA4_PTR1,
240 .ptr2_reg = DMA4_PTR2,
241 .cnt1_reg = DMA4_CNT1,
242 .cnt2_reg = DMA4_CNT2,
244 [SRAM_CH05] = {
245 .name = "ch5",
246 .cmds_start = 0x0,
247 .ctrl_start = 0x0,
248 .cdt = 0x0,
249 .fifo_start = 0x0,
250 .fifo_size = 0x0,
251 .ptr1_reg = DMA5_PTR1,
252 .ptr2_reg = DMA5_PTR2,
253 .cnt1_reg = DMA5_CNT1,
254 .cnt2_reg = DMA5_CNT2,
256 [SRAM_CH06] = {
257 .name = "TS2 C",
258 .cmds_start = 0x10140,
259 .ctrl_start = 0x10670,
260 .cdt = 0x108d0,
261 .fifo_start = 0x6000,
262 .fifo_size = 0x1000,
263 .ptr1_reg = DMA5_PTR1,
264 .ptr2_reg = DMA5_PTR2,
265 .cnt1_reg = DMA5_CNT1,
266 .cnt2_reg = DMA5_CNT2,
268 [SRAM_CH07] = {
269 .name = "ch7",
270 .cmds_start = 0x0,
271 .ctrl_start = 0x0,
272 .cdt = 0x0,
273 .fifo_start = 0x0,
274 .fifo_size = 0x0,
275 .ptr1_reg = DMA6_PTR1,
276 .ptr2_reg = DMA6_PTR2,
277 .cnt1_reg = DMA6_CNT1,
278 .cnt2_reg = DMA6_CNT2,
280 [SRAM_CH08] = {
281 .name = "ch8",
282 .cmds_start = 0x0,
283 .ctrl_start = 0x0,
284 .cdt = 0x0,
285 .fifo_start = 0x0,
286 .fifo_size = 0x0,
287 .ptr1_reg = DMA7_PTR1,
288 .ptr2_reg = DMA7_PTR2,
289 .cnt1_reg = DMA7_CNT1,
290 .cnt2_reg = DMA7_CNT2,
292 [SRAM_CH09] = {
293 .name = "ch9",
294 .cmds_start = 0x0,
295 .ctrl_start = 0x0,
296 .cdt = 0x0,
297 .fifo_start = 0x0,
298 .fifo_size = 0x0,
299 .ptr1_reg = DMA8_PTR1,
300 .ptr2_reg = DMA8_PTR2,
301 .cnt1_reg = DMA8_CNT1,
302 .cnt2_reg = DMA8_CNT2,
306 void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
308 unsigned long flags;
309 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
311 dev->pci_irqmask |= mask;
313 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
316 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
318 unsigned long flags;
319 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
321 dev->pci_irqmask |= mask;
322 cx_set(PCI_INT_MSK, mask);
324 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
327 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
329 u32 v;
330 unsigned long flags;
331 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
333 v = mask & dev->pci_irqmask;
334 if (v)
335 cx_set(PCI_INT_MSK, v);
337 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
340 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
342 cx23885_irq_enable(dev, 0xffffffff);
345 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
347 unsigned long flags;
348 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
350 cx_clear(PCI_INT_MSK, mask);
352 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
355 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
357 cx23885_irq_disable(dev, 0xffffffff);
360 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
362 unsigned long flags;
363 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
365 dev->pci_irqmask &= ~mask;
366 cx_clear(PCI_INT_MSK, mask);
368 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
371 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
373 u32 v;
374 unsigned long flags;
375 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
377 v = cx_read(PCI_INT_MSK);
379 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
380 return v;
383 static int cx23885_risc_decode(u32 risc)
385 static char *instr[16] = {
386 [RISC_SYNC >> 28] = "sync",
387 [RISC_WRITE >> 28] = "write",
388 [RISC_WRITEC >> 28] = "writec",
389 [RISC_READ >> 28] = "read",
390 [RISC_READC >> 28] = "readc",
391 [RISC_JUMP >> 28] = "jump",
392 [RISC_SKIP >> 28] = "skip",
393 [RISC_WRITERM >> 28] = "writerm",
394 [RISC_WRITECM >> 28] = "writecm",
395 [RISC_WRITECR >> 28] = "writecr",
397 static int incr[16] = {
398 [RISC_WRITE >> 28] = 3,
399 [RISC_JUMP >> 28] = 3,
400 [RISC_SKIP >> 28] = 1,
401 [RISC_SYNC >> 28] = 1,
402 [RISC_WRITERM >> 28] = 3,
403 [RISC_WRITECM >> 28] = 3,
404 [RISC_WRITECR >> 28] = 4,
406 static char *bits[] = {
407 "12", "13", "14", "resync",
408 "cnt0", "cnt1", "18", "19",
409 "20", "21", "22", "23",
410 "irq1", "irq2", "eol", "sol",
412 int i;
414 printk("0x%08x [ %s", risc,
415 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
416 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
417 if (risc & (1 << (i + 12)))
418 printk(" %s", bits[i]);
419 printk(" count=%d ]\n", risc & 0xfff);
420 return incr[risc >> 28] ? incr[risc >> 28] : 1;
423 void cx23885_wakeup(struct cx23885_tsport *port,
424 struct cx23885_dmaqueue *q, u32 count)
426 struct cx23885_dev *dev = port->dev;
427 struct cx23885_buffer *buf;
428 int bc;
430 for (bc = 0;; bc++) {
431 if (list_empty(&q->active))
432 break;
433 buf = list_entry(q->active.next,
434 struct cx23885_buffer, vb.queue);
436 /* count comes from the hw and is is 16bit wide --
437 * this trick handles wrap-arounds correctly for
438 * up to 32767 buffers in flight... */
439 if ((s16) (count - buf->count) < 0)
440 break;
442 do_gettimeofday(&buf->vb.ts);
443 dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.i,
444 count, buf->count);
445 buf->vb.state = VIDEOBUF_DONE;
446 list_del(&buf->vb.queue);
447 wake_up(&buf->vb.done);
449 if (list_empty(&q->active))
450 del_timer(&q->timeout);
451 else
452 mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
453 if (bc != 1)
454 printk(KERN_WARNING "%s: %d buffers handled (should be 1)\n",
455 __func__, bc);
458 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
459 struct sram_channel *ch,
460 unsigned int bpl, u32 risc)
462 unsigned int i, lines;
463 u32 cdt;
465 if (ch->cmds_start == 0) {
466 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
467 ch->name);
468 cx_write(ch->ptr1_reg, 0);
469 cx_write(ch->ptr2_reg, 0);
470 cx_write(ch->cnt2_reg, 0);
471 cx_write(ch->cnt1_reg, 0);
472 return 0;
473 } else {
474 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
475 ch->name);
478 bpl = (bpl + 7) & ~7; /* alignment */
479 cdt = ch->cdt;
480 lines = ch->fifo_size / bpl;
481 if (lines > 6)
482 lines = 6;
483 BUG_ON(lines < 2);
485 cx_write(8 + 0, RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
486 cx_write(8 + 4, 8);
487 cx_write(8 + 8, 0);
489 /* write CDT */
490 for (i = 0; i < lines; i++) {
491 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
492 ch->fifo_start + bpl*i);
493 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
494 cx_write(cdt + 16*i + 4, 0);
495 cx_write(cdt + 16*i + 8, 0);
496 cx_write(cdt + 16*i + 12, 0);
499 /* write CMDS */
500 if (ch->jumponly)
501 cx_write(ch->cmds_start + 0, 8);
502 else
503 cx_write(ch->cmds_start + 0, risc);
504 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
505 cx_write(ch->cmds_start + 8, cdt);
506 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
507 cx_write(ch->cmds_start + 16, ch->ctrl_start);
508 if (ch->jumponly)
509 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
510 else
511 cx_write(ch->cmds_start + 20, 64 >> 2);
512 for (i = 24; i < 80; i += 4)
513 cx_write(ch->cmds_start + i, 0);
515 /* fill registers */
516 cx_write(ch->ptr1_reg, ch->fifo_start);
517 cx_write(ch->ptr2_reg, cdt);
518 cx_write(ch->cnt2_reg, (lines*16) >> 3);
519 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
521 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
522 dev->bridge,
523 ch->name,
524 bpl,
525 lines);
527 return 0;
530 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
531 struct sram_channel *ch)
533 static char *name[] = {
534 "init risc lo",
535 "init risc hi",
536 "cdt base",
537 "cdt size",
538 "iq base",
539 "iq size",
540 "risc pc lo",
541 "risc pc hi",
542 "iq wr ptr",
543 "iq rd ptr",
544 "cdt current",
545 "pci target lo",
546 "pci target hi",
547 "line / byte",
549 u32 risc;
550 unsigned int i, j, n;
552 printk(KERN_WARNING "%s: %s - dma channel status dump\n",
553 dev->name, ch->name);
554 for (i = 0; i < ARRAY_SIZE(name); i++)
555 printk(KERN_WARNING "%s: cmds: %-15s: 0x%08x\n",
556 dev->name, name[i],
557 cx_read(ch->cmds_start + 4*i));
559 for (i = 0; i < 4; i++) {
560 risc = cx_read(ch->cmds_start + 4 * (i + 14));
561 printk(KERN_WARNING "%s: risc%d: ", dev->name, i);
562 cx23885_risc_decode(risc);
564 for (i = 0; i < (64 >> 2); i += n) {
565 risc = cx_read(ch->ctrl_start + 4 * i);
566 /* No consideration for bits 63-32 */
568 printk(KERN_WARNING "%s: (0x%08x) iq %x: ", dev->name,
569 ch->ctrl_start + 4 * i, i);
570 n = cx23885_risc_decode(risc);
571 for (j = 1; j < n; j++) {
572 risc = cx_read(ch->ctrl_start + 4 * (i + j));
573 printk(KERN_WARNING "%s: iq %x: 0x%08x [ arg #%d ]\n",
574 dev->name, i+j, risc, j);
578 printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
579 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
580 printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
581 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
582 printk(KERN_WARNING "%s: ptr1_reg: 0x%08x\n",
583 dev->name, cx_read(ch->ptr1_reg));
584 printk(KERN_WARNING "%s: ptr2_reg: 0x%08x\n",
585 dev->name, cx_read(ch->ptr2_reg));
586 printk(KERN_WARNING "%s: cnt1_reg: 0x%08x\n",
587 dev->name, cx_read(ch->cnt1_reg));
588 printk(KERN_WARNING "%s: cnt2_reg: 0x%08x\n",
589 dev->name, cx_read(ch->cnt2_reg));
592 static void cx23885_risc_disasm(struct cx23885_tsport *port,
593 struct btcx_riscmem *risc)
595 struct cx23885_dev *dev = port->dev;
596 unsigned int i, j, n;
598 printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
599 dev->name, risc->cpu, (unsigned long)risc->dma);
600 for (i = 0; i < (risc->size >> 2); i += n) {
601 printk(KERN_INFO "%s: %04d: ", dev->name, i);
602 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
603 for (j = 1; j < n; j++)
604 printk(KERN_INFO "%s: %04d: 0x%08x [ arg #%d ]\n",
605 dev->name, i + j, risc->cpu[i + j], j);
606 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
607 break;
611 static void cx23885_shutdown(struct cx23885_dev *dev)
613 /* disable RISC controller */
614 cx_write(DEV_CNTRL2, 0);
616 /* Disable all IR activity */
617 cx_write(IR_CNTRL_REG, 0);
619 /* Disable Video A/B activity */
620 cx_write(VID_A_DMA_CTL, 0);
621 cx_write(VID_B_DMA_CTL, 0);
622 cx_write(VID_C_DMA_CTL, 0);
624 /* Disable Audio activity */
625 cx_write(AUD_INT_DMA_CTL, 0);
626 cx_write(AUD_EXT_DMA_CTL, 0);
628 /* Disable Serial port */
629 cx_write(UART_CTL, 0);
631 /* Disable Interrupts */
632 cx23885_irq_disable_all(dev);
633 cx_write(VID_A_INT_MSK, 0);
634 cx_write(VID_B_INT_MSK, 0);
635 cx_write(VID_C_INT_MSK, 0);
636 cx_write(AUDIO_INT_INT_MSK, 0);
637 cx_write(AUDIO_EXT_INT_MSK, 0);
641 static void cx23885_reset(struct cx23885_dev *dev)
643 dprintk(1, "%s()\n", __func__);
645 cx23885_shutdown(dev);
647 cx_write(PCI_INT_STAT, 0xffffffff);
648 cx_write(VID_A_INT_STAT, 0xffffffff);
649 cx_write(VID_B_INT_STAT, 0xffffffff);
650 cx_write(VID_C_INT_STAT, 0xffffffff);
651 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
652 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
653 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
654 cx_write(PAD_CTRL, 0x00500300);
656 mdelay(100);
658 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
659 720*4, 0);
660 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
661 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
662 188*4, 0);
663 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
664 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
665 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
666 188*4, 0);
667 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
668 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
669 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
671 cx23885_gpio_setup(dev);
675 static int cx23885_pci_quirks(struct cx23885_dev *dev)
677 dprintk(1, "%s()\n", __func__);
679 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
680 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
681 * occur on the cx23887 bridge.
683 if (dev->bridge == CX23885_BRIDGE_885)
684 cx_clear(RDR_TLCTL0, 1 << 4);
686 return 0;
689 static int get_resources(struct cx23885_dev *dev)
691 if (request_mem_region(pci_resource_start(dev->pci, 0),
692 pci_resource_len(dev->pci, 0),
693 dev->name))
694 return 0;
696 printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
697 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
699 return -EBUSY;
702 static void cx23885_timeout(unsigned long data);
703 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
704 u32 reg, u32 mask, u32 value);
706 static int cx23885_init_tsport(struct cx23885_dev *dev,
707 struct cx23885_tsport *port, int portno)
709 dprintk(1, "%s(portno=%d)\n", __func__, portno);
711 /* Transport bus init dma queue - Common settings */
712 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
713 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
714 port->vld_misc_val = 0x0;
715 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
717 spin_lock_init(&port->slock);
718 port->dev = dev;
719 port->nr = portno;
721 INIT_LIST_HEAD(&port->mpegq.active);
722 INIT_LIST_HEAD(&port->mpegq.queued);
723 port->mpegq.timeout.function = cx23885_timeout;
724 port->mpegq.timeout.data = (unsigned long)port;
725 init_timer(&port->mpegq.timeout);
727 mutex_init(&port->frontends.lock);
728 INIT_LIST_HEAD(&port->frontends.felist);
729 port->frontends.active_fe_id = 0;
731 /* This should be hardcoded allow a single frontend
732 * attachment to this tsport, keeping the -dvb.c
733 * code clean and safe.
735 if (!port->num_frontends)
736 port->num_frontends = 1;
738 switch (portno) {
739 case 1:
740 port->reg_gpcnt = VID_B_GPCNT;
741 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
742 port->reg_dma_ctl = VID_B_DMA_CTL;
743 port->reg_lngth = VID_B_LNGTH;
744 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
745 port->reg_gen_ctrl = VID_B_GEN_CTL;
746 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
747 port->reg_sop_status = VID_B_SOP_STATUS;
748 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
749 port->reg_vld_misc = VID_B_VLD_MISC;
750 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
751 port->reg_src_sel = VID_B_SRC_SEL;
752 port->reg_ts_int_msk = VID_B_INT_MSK;
753 port->reg_ts_int_stat = VID_B_INT_STAT;
754 port->sram_chno = SRAM_CH03; /* VID_B */
755 port->pci_irqmask = 0x02; /* VID_B bit1 */
756 break;
757 case 2:
758 port->reg_gpcnt = VID_C_GPCNT;
759 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
760 port->reg_dma_ctl = VID_C_DMA_CTL;
761 port->reg_lngth = VID_C_LNGTH;
762 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
763 port->reg_gen_ctrl = VID_C_GEN_CTL;
764 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
765 port->reg_sop_status = VID_C_SOP_STATUS;
766 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
767 port->reg_vld_misc = VID_C_VLD_MISC;
768 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
769 port->reg_src_sel = 0;
770 port->reg_ts_int_msk = VID_C_INT_MSK;
771 port->reg_ts_int_stat = VID_C_INT_STAT;
772 port->sram_chno = SRAM_CH06; /* VID_C */
773 port->pci_irqmask = 0x04; /* VID_C bit2 */
774 break;
775 default:
776 BUG();
779 cx23885_risc_stopper(dev->pci, &port->mpegq.stopper,
780 port->reg_dma_ctl, port->dma_ctl_val, 0x00);
782 return 0;
785 static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
787 switch (cx_read(RDR_CFG2) & 0xff) {
788 case 0x00:
789 /* cx23885 */
790 dev->hwrevision = 0xa0;
791 break;
792 case 0x01:
793 /* CX23885-12Z */
794 dev->hwrevision = 0xa1;
795 break;
796 case 0x02:
797 /* CX23885-13Z/14Z */
798 dev->hwrevision = 0xb0;
799 break;
800 case 0x03:
801 if (dev->pci->device == 0x8880) {
802 /* CX23888-21Z/22Z */
803 dev->hwrevision = 0xc0;
804 } else {
805 /* CX23885-14Z */
806 dev->hwrevision = 0xa4;
808 break;
809 case 0x04:
810 if (dev->pci->device == 0x8880) {
811 /* CX23888-31Z */
812 dev->hwrevision = 0xd0;
813 } else {
814 /* CX23885-15Z, CX23888-31Z */
815 dev->hwrevision = 0xa5;
817 break;
818 case 0x0e:
819 /* CX23887-15Z */
820 dev->hwrevision = 0xc0;
821 break;
822 case 0x0f:
823 /* CX23887-14Z */
824 dev->hwrevision = 0xb1;
825 break;
826 default:
827 printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
828 __func__, dev->hwrevision);
830 if (dev->hwrevision)
831 printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
832 __func__, dev->hwrevision);
833 else
834 printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
835 __func__, dev->hwrevision);
838 /* Find the first v4l2_subdev member of the group id in hw */
839 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
841 struct v4l2_subdev *result = NULL;
842 struct v4l2_subdev *sd;
844 spin_lock(&dev->v4l2_dev.lock);
845 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
846 if (sd->grp_id == hw) {
847 result = sd;
848 break;
851 spin_unlock(&dev->v4l2_dev.lock);
852 return result;
855 static int cx23885_dev_setup(struct cx23885_dev *dev)
857 int i;
859 spin_lock_init(&dev->pci_irqmask_lock);
861 mutex_init(&dev->lock);
862 mutex_init(&dev->gpio_lock);
864 atomic_inc(&dev->refcount);
866 dev->nr = cx23885_devcount++;
867 sprintf(dev->name, "cx23885[%d]", dev->nr);
869 /* Configure the internal memory */
870 if (dev->pci->device == 0x8880) {
871 /* Could be 887 or 888, assume a default */
872 dev->bridge = CX23885_BRIDGE_887;
873 /* Apply a sensible clock frequency for the PCIe bridge */
874 dev->clk_freq = 25000000;
875 dev->sram_channels = cx23887_sram_channels;
876 } else
877 if (dev->pci->device == 0x8852) {
878 dev->bridge = CX23885_BRIDGE_885;
879 /* Apply a sensible clock frequency for the PCIe bridge */
880 dev->clk_freq = 28000000;
881 dev->sram_channels = cx23885_sram_channels;
882 } else
883 BUG();
885 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
886 __func__, dev->bridge);
888 /* board config */
889 dev->board = UNSET;
890 if (card[dev->nr] < cx23885_bcount)
891 dev->board = card[dev->nr];
892 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
893 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
894 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
895 dev->board = cx23885_subids[i].card;
896 if (UNSET == dev->board) {
897 dev->board = CX23885_BOARD_UNKNOWN;
898 cx23885_card_list(dev);
901 /* If the user specific a clk freq override, apply it */
902 if (cx23885_boards[dev->board].clk_freq > 0)
903 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
905 dev->pci_bus = dev->pci->bus->number;
906 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
907 cx23885_irq_add(dev, 0x001f00);
909 /* External Master 1 Bus */
910 dev->i2c_bus[0].nr = 0;
911 dev->i2c_bus[0].dev = dev;
912 dev->i2c_bus[0].reg_stat = I2C1_STAT;
913 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
914 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
915 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
916 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
917 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
919 /* External Master 2 Bus */
920 dev->i2c_bus[1].nr = 1;
921 dev->i2c_bus[1].dev = dev;
922 dev->i2c_bus[1].reg_stat = I2C2_STAT;
923 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
924 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
925 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
926 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
927 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
929 /* Internal Master 3 Bus */
930 dev->i2c_bus[2].nr = 2;
931 dev->i2c_bus[2].dev = dev;
932 dev->i2c_bus[2].reg_stat = I2C3_STAT;
933 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
934 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
935 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
936 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
937 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
939 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
940 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
941 cx23885_init_tsport(dev, &dev->ts1, 1);
943 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
944 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
945 cx23885_init_tsport(dev, &dev->ts2, 2);
947 if (get_resources(dev) < 0) {
948 printk(KERN_ERR "CORE %s No more PCIe resources for "
949 "subsystem: %04x:%04x\n",
950 dev->name, dev->pci->subsystem_vendor,
951 dev->pci->subsystem_device);
953 cx23885_devcount--;
954 return -ENODEV;
957 /* PCIe stuff */
958 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
959 pci_resource_len(dev->pci, 0));
961 dev->bmmio = (u8 __iomem *)dev->lmmio;
963 printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
964 dev->name, dev->pci->subsystem_vendor,
965 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
966 dev->board, card[dev->nr] == dev->board ?
967 "insmod option" : "autodetected");
969 cx23885_pci_quirks(dev);
971 /* Assume some sensible defaults */
972 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
973 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
974 dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
975 dev->radio_type = cx23885_boards[dev->board].radio_type;
976 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
978 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
979 __func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
980 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
981 __func__, dev->radio_type, dev->radio_addr);
983 /* The cx23417 encoder has GPIO's that need to be initialised
984 * before DVB, so that demodulators and tuners are out of
985 * reset before DVB uses them.
987 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
988 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
989 cx23885_mc417_init(dev);
991 /* init hardware */
992 cx23885_reset(dev);
994 cx23885_i2c_register(&dev->i2c_bus[0]);
995 cx23885_i2c_register(&dev->i2c_bus[1]);
996 cx23885_i2c_register(&dev->i2c_bus[2]);
997 cx23885_card_setup(dev);
998 call_all(dev, core, s_power, 0);
999 cx23885_ir_init(dev);
1001 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
1002 if (cx23885_video_register(dev) < 0) {
1003 printk(KERN_ERR "%s() Failed to register analog "
1004 "video adapters on VID_A\n", __func__);
1008 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1009 if (cx23885_boards[dev->board].num_fds_portb)
1010 dev->ts1.num_frontends =
1011 cx23885_boards[dev->board].num_fds_portb;
1012 if (cx23885_dvb_register(&dev->ts1) < 0) {
1013 printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
1014 __func__);
1016 } else
1017 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1018 if (cx23885_417_register(dev) < 0) {
1019 printk(KERN_ERR
1020 "%s() Failed to register 417 on VID_B\n",
1021 __func__);
1025 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1026 if (cx23885_boards[dev->board].num_fds_portc)
1027 dev->ts2.num_frontends =
1028 cx23885_boards[dev->board].num_fds_portc;
1029 if (cx23885_dvb_register(&dev->ts2) < 0) {
1030 printk(KERN_ERR
1031 "%s() Failed to register dvb on VID_C\n",
1032 __func__);
1034 } else
1035 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1036 if (cx23885_417_register(dev) < 0) {
1037 printk(KERN_ERR
1038 "%s() Failed to register 417 on VID_C\n",
1039 __func__);
1043 cx23885_dev_checkrevision(dev);
1045 /* disable MSI for NetUP cards, otherwise CI is not working */
1046 if (cx23885_boards[dev->board].ci_type > 0)
1047 cx_clear(RDR_RDRCTL1, 1 << 8);
1049 return 0;
1052 static void cx23885_dev_unregister(struct cx23885_dev *dev)
1054 release_mem_region(pci_resource_start(dev->pci, 0),
1055 pci_resource_len(dev->pci, 0));
1057 if (!atomic_dec_and_test(&dev->refcount))
1058 return;
1060 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1061 cx23885_video_unregister(dev);
1063 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1064 cx23885_dvb_unregister(&dev->ts1);
1066 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1067 cx23885_417_unregister(dev);
1069 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1070 cx23885_dvb_unregister(&dev->ts2);
1072 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1073 cx23885_417_unregister(dev);
1075 cx23885_i2c_unregister(&dev->i2c_bus[2]);
1076 cx23885_i2c_unregister(&dev->i2c_bus[1]);
1077 cx23885_i2c_unregister(&dev->i2c_bus[0]);
1079 iounmap(dev->lmmio);
1082 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1083 unsigned int offset, u32 sync_line,
1084 unsigned int bpl, unsigned int padding,
1085 unsigned int lines)
1087 struct scatterlist *sg;
1088 unsigned int line, todo;
1090 /* sync instruction */
1091 if (sync_line != NO_SYNC_LINE)
1092 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1094 /* scan lines */
1095 sg = sglist;
1096 for (line = 0; line < lines; line++) {
1097 while (offset && offset >= sg_dma_len(sg)) {
1098 offset -= sg_dma_len(sg);
1099 sg++;
1101 if (bpl <= sg_dma_len(sg)-offset) {
1102 /* fits into current chunk */
1103 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|RISC_EOL|bpl);
1104 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1105 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1106 offset += bpl;
1107 } else {
1108 /* scanline needs to be split */
1109 todo = bpl;
1110 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|
1111 (sg_dma_len(sg)-offset));
1112 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1113 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1114 todo -= (sg_dma_len(sg)-offset);
1115 offset = 0;
1116 sg++;
1117 while (todo > sg_dma_len(sg)) {
1118 *(rp++) = cpu_to_le32(RISC_WRITE|
1119 sg_dma_len(sg));
1120 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1121 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1122 todo -= sg_dma_len(sg);
1123 sg++;
1125 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1126 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1127 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1128 offset += todo;
1130 offset += padding;
1133 return rp;
1136 int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
1137 struct scatterlist *sglist, unsigned int top_offset,
1138 unsigned int bottom_offset, unsigned int bpl,
1139 unsigned int padding, unsigned int lines)
1141 u32 instructions, fields;
1142 __le32 *rp;
1143 int rc;
1145 fields = 0;
1146 if (UNSET != top_offset)
1147 fields++;
1148 if (UNSET != bottom_offset)
1149 fields++;
1151 /* estimate risc mem: worst case is one write per page border +
1152 one write per scan line + syncs + jump (all 2 dwords). Padding
1153 can cause next bpl to start close to a page border. First DMA
1154 region may be smaller than PAGE_SIZE */
1155 /* write and jump need and extra dword */
1156 instructions = fields * (1 + ((bpl + padding) * lines)
1157 / PAGE_SIZE + lines);
1158 instructions += 2;
1159 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1160 if (rc < 0)
1161 return rc;
1163 /* write risc instructions */
1164 rp = risc->cpu;
1165 if (UNSET != top_offset)
1166 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1167 bpl, padding, lines);
1168 if (UNSET != bottom_offset)
1169 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1170 bpl, padding, lines);
1172 /* save pointer to jmp instruction address */
1173 risc->jmp = rp;
1174 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1175 return 0;
1178 static int cx23885_risc_databuffer(struct pci_dev *pci,
1179 struct btcx_riscmem *risc,
1180 struct scatterlist *sglist,
1181 unsigned int bpl,
1182 unsigned int lines)
1184 u32 instructions;
1185 __le32 *rp;
1186 int rc;
1188 /* estimate risc mem: worst case is one write per page border +
1189 one write per scan line + syncs + jump (all 2 dwords). Here
1190 there is no padding and no sync. First DMA region may be smaller
1191 than PAGE_SIZE */
1192 /* Jump and write need an extra dword */
1193 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
1194 instructions += 1;
1196 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1197 if (rc < 0)
1198 return rc;
1200 /* write risc instructions */
1201 rp = risc->cpu;
1202 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0, lines);
1204 /* save pointer to jmp instruction address */
1205 risc->jmp = rp;
1206 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1207 return 0;
1210 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
1211 u32 reg, u32 mask, u32 value)
1213 __le32 *rp;
1214 int rc;
1216 rc = btcx_riscmem_alloc(pci, risc, 4*16);
1217 if (rc < 0)
1218 return rc;
1220 /* write risc instructions */
1221 rp = risc->cpu;
1222 *(rp++) = cpu_to_le32(RISC_WRITECR | RISC_IRQ2);
1223 *(rp++) = cpu_to_le32(reg);
1224 *(rp++) = cpu_to_le32(value);
1225 *(rp++) = cpu_to_le32(mask);
1226 *(rp++) = cpu_to_le32(RISC_JUMP);
1227 *(rp++) = cpu_to_le32(risc->dma);
1228 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1229 return 0;
1232 void cx23885_free_buffer(struct videobuf_queue *q, struct cx23885_buffer *buf)
1234 struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
1236 BUG_ON(in_interrupt());
1237 videobuf_waiton(q, &buf->vb, 0, 0);
1238 videobuf_dma_unmap(q->dev, dma);
1239 videobuf_dma_free(dma);
1240 btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
1241 buf->vb.state = VIDEOBUF_NEEDS_INIT;
1244 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1246 struct cx23885_dev *dev = port->dev;
1248 dprintk(1, "%s() Register Dump\n", __func__);
1249 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
1250 cx_read(DEV_CNTRL2));
1251 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
1252 cx23885_irq_get_mask(dev));
1253 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
1254 cx_read(AUDIO_INT_INT_MSK));
1255 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
1256 cx_read(AUD_INT_DMA_CTL));
1257 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
1258 cx_read(AUDIO_EXT_INT_MSK));
1259 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
1260 cx_read(AUD_EXT_DMA_CTL));
1261 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
1262 cx_read(PAD_CTRL));
1263 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
1264 cx_read(ALT_PIN_OUT_SEL));
1265 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
1266 cx_read(GPIO2));
1267 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
1268 port->reg_gpcnt, cx_read(port->reg_gpcnt));
1269 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
1270 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1271 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
1272 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1273 if (port->reg_src_sel)
1274 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1275 port->reg_src_sel, cx_read(port->reg_src_sel));
1276 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
1277 port->reg_lngth, cx_read(port->reg_lngth));
1278 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
1279 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1280 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
1281 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1282 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
1283 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1284 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
1285 port->reg_sop_status, cx_read(port->reg_sop_status));
1286 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1287 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1288 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
1289 port->reg_vld_misc, cx_read(port->reg_vld_misc));
1290 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
1291 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1292 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
1293 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1296 static int cx23885_start_dma(struct cx23885_tsport *port,
1297 struct cx23885_dmaqueue *q,
1298 struct cx23885_buffer *buf)
1300 struct cx23885_dev *dev = port->dev;
1301 u32 reg;
1303 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1304 buf->vb.width, buf->vb.height, buf->vb.field);
1306 /* Stop the fifo and risc engine for this port */
1307 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1309 /* setup fifo + format */
1310 cx23885_sram_channel_setup(dev,
1311 &dev->sram_channels[port->sram_chno],
1312 port->ts_packet_size, buf->risc.dma);
1313 if (debug > 5) {
1314 cx23885_sram_channel_dump(dev,
1315 &dev->sram_channels[port->sram_chno]);
1316 cx23885_risc_disasm(port, &buf->risc);
1319 /* write TS length to chip */
1320 cx_write(port->reg_lngth, buf->vb.width);
1322 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1323 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1324 printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1325 __func__,
1326 cx23885_boards[dev->board].portb,
1327 cx23885_boards[dev->board].portc);
1328 return -EINVAL;
1331 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1332 cx23885_av_clk(dev, 0);
1334 udelay(100);
1336 /* If the port supports SRC SELECT, configure it */
1337 if (port->reg_src_sel)
1338 cx_write(port->reg_src_sel, port->src_sel_val);
1340 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1341 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1342 cx_write(port->reg_vld_misc, port->vld_misc_val);
1343 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1344 udelay(100);
1346 /* NOTE: this is 2 (reserved) for portb, does it matter? */
1347 /* reset counter to zero */
1348 cx_write(port->reg_gpcnt_ctl, 3);
1349 q->count = 1;
1351 /* Set VIDB pins to input */
1352 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1353 reg = cx_read(PAD_CTRL);
1354 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1355 cx_write(PAD_CTRL, reg);
1358 /* Set VIDC pins to input */
1359 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1360 reg = cx_read(PAD_CTRL);
1361 reg &= ~0x4; /* Clear TS2_SOP_OE */
1362 cx_write(PAD_CTRL, reg);
1365 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1367 reg = cx_read(PAD_CTRL);
1368 reg = reg & ~0x1; /* Clear TS1_OE */
1370 /* FIXME, bit 2 writing here is questionable */
1371 /* set TS1_SOP_OE and TS1_OE_HI */
1372 reg = reg | 0xa;
1373 cx_write(PAD_CTRL, reg);
1375 /* FIXME and these two registers should be documented. */
1376 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1377 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1380 switch (dev->bridge) {
1381 case CX23885_BRIDGE_885:
1382 case CX23885_BRIDGE_887:
1383 case CX23885_BRIDGE_888:
1384 /* enable irqs */
1385 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1386 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1387 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1388 cx23885_irq_add(dev, port->pci_irqmask);
1389 cx23885_irq_enable_all(dev);
1390 break;
1391 default:
1392 BUG();
1395 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1397 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1398 cx23885_av_clk(dev, 1);
1400 if (debug > 4)
1401 cx23885_tsport_reg_dump(port);
1403 return 0;
1406 static int cx23885_stop_dma(struct cx23885_tsport *port)
1408 struct cx23885_dev *dev = port->dev;
1409 u32 reg;
1411 dprintk(1, "%s()\n", __func__);
1413 /* Stop interrupts and DMA */
1414 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1415 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1417 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1419 reg = cx_read(PAD_CTRL);
1421 /* Set TS1_OE */
1422 reg = reg | 0x1;
1424 /* clear TS1_SOP_OE and TS1_OE_HI */
1425 reg = reg & ~0xa;
1426 cx_write(PAD_CTRL, reg);
1427 cx_write(port->reg_src_sel, 0);
1428 cx_write(port->reg_gen_ctrl, 8);
1432 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1433 cx23885_av_clk(dev, 0);
1435 return 0;
1438 int cx23885_restart_queue(struct cx23885_tsport *port,
1439 struct cx23885_dmaqueue *q)
1441 struct cx23885_dev *dev = port->dev;
1442 struct cx23885_buffer *buf;
1444 dprintk(5, "%s()\n", __func__);
1445 if (list_empty(&q->active)) {
1446 struct cx23885_buffer *prev;
1447 prev = NULL;
1449 dprintk(5, "%s() queue is empty\n", __func__);
1451 for (;;) {
1452 if (list_empty(&q->queued))
1453 return 0;
1454 buf = list_entry(q->queued.next, struct cx23885_buffer,
1455 vb.queue);
1456 if (NULL == prev) {
1457 list_del(&buf->vb.queue);
1458 list_add_tail(&buf->vb.queue, &q->active);
1459 cx23885_start_dma(port, q, buf);
1460 buf->vb.state = VIDEOBUF_ACTIVE;
1461 buf->count = q->count++;
1462 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
1463 dprintk(5, "[%p/%d] restart_queue - f/active\n",
1464 buf, buf->vb.i);
1466 } else if (prev->vb.width == buf->vb.width &&
1467 prev->vb.height == buf->vb.height &&
1468 prev->fmt == buf->fmt) {
1469 list_del(&buf->vb.queue);
1470 list_add_tail(&buf->vb.queue, &q->active);
1471 buf->vb.state = VIDEOBUF_ACTIVE;
1472 buf->count = q->count++;
1473 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1474 /* 64 bit bits 63-32 */
1475 prev->risc.jmp[2] = cpu_to_le32(0);
1476 dprintk(5, "[%p/%d] restart_queue - m/active\n",
1477 buf, buf->vb.i);
1478 } else {
1479 return 0;
1481 prev = buf;
1483 return 0;
1486 buf = list_entry(q->active.next, struct cx23885_buffer, vb.queue);
1487 dprintk(2, "restart_queue [%p/%d]: restart dma\n",
1488 buf, buf->vb.i);
1489 cx23885_start_dma(port, q, buf);
1490 list_for_each_entry(buf, &q->active, vb.queue)
1491 buf->count = q->count++;
1492 mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
1493 return 0;
1496 /* ------------------------------------------------------------------ */
1498 int cx23885_buf_prepare(struct videobuf_queue *q, struct cx23885_tsport *port,
1499 struct cx23885_buffer *buf, enum v4l2_field field)
1501 struct cx23885_dev *dev = port->dev;
1502 int size = port->ts_packet_size * port->ts_packet_count;
1503 int rc;
1505 dprintk(1, "%s: %p\n", __func__, buf);
1506 if (0 != buf->vb.baddr && buf->vb.bsize < size)
1507 return -EINVAL;
1509 if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
1510 buf->vb.width = port->ts_packet_size;
1511 buf->vb.height = port->ts_packet_count;
1512 buf->vb.size = size;
1513 buf->vb.field = field /*V4L2_FIELD_TOP*/;
1515 rc = videobuf_iolock(q, &buf->vb, NULL);
1516 if (0 != rc)
1517 goto fail;
1518 cx23885_risc_databuffer(dev->pci, &buf->risc,
1519 videobuf_to_dma(&buf->vb)->sglist,
1520 buf->vb.width, buf->vb.height);
1522 buf->vb.state = VIDEOBUF_PREPARED;
1523 return 0;
1525 fail:
1526 cx23885_free_buffer(q, buf);
1527 return rc;
1530 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1532 struct cx23885_buffer *prev;
1533 struct cx23885_dev *dev = port->dev;
1534 struct cx23885_dmaqueue *cx88q = &port->mpegq;
1536 /* add jump to stopper */
1537 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
1538 buf->risc.jmp[1] = cpu_to_le32(cx88q->stopper.dma);
1539 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1541 if (list_empty(&cx88q->active)) {
1542 dprintk(1, "queue is empty - first active\n");
1543 list_add_tail(&buf->vb.queue, &cx88q->active);
1544 cx23885_start_dma(port, cx88q, buf);
1545 buf->vb.state = VIDEOBUF_ACTIVE;
1546 buf->count = cx88q->count++;
1547 mod_timer(&cx88q->timeout, jiffies + BUFFER_TIMEOUT);
1548 dprintk(1, "[%p/%d] %s - first active\n",
1549 buf, buf->vb.i, __func__);
1550 } else {
1551 dprintk(1, "queue is not empty - append to active\n");
1552 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1553 vb.queue);
1554 list_add_tail(&buf->vb.queue, &cx88q->active);
1555 buf->vb.state = VIDEOBUF_ACTIVE;
1556 buf->count = cx88q->count++;
1557 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1558 prev->risc.jmp[2] = cpu_to_le32(0); /* 64 bit bits 63-32 */
1559 dprintk(1, "[%p/%d] %s - append to active\n",
1560 buf, buf->vb.i, __func__);
1564 /* ----------------------------------------------------------- */
1566 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason,
1567 int restart)
1569 struct cx23885_dev *dev = port->dev;
1570 struct cx23885_dmaqueue *q = &port->mpegq;
1571 struct cx23885_buffer *buf;
1572 unsigned long flags;
1574 spin_lock_irqsave(&port->slock, flags);
1575 while (!list_empty(&q->active)) {
1576 buf = list_entry(q->active.next, struct cx23885_buffer,
1577 vb.queue);
1578 list_del(&buf->vb.queue);
1579 buf->vb.state = VIDEOBUF_ERROR;
1580 wake_up(&buf->vb.done);
1581 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1582 buf, buf->vb.i, reason, (unsigned long)buf->risc.dma);
1584 if (restart) {
1585 dprintk(1, "restarting queue\n");
1586 cx23885_restart_queue(port, q);
1588 spin_unlock_irqrestore(&port->slock, flags);
1591 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1593 struct cx23885_dev *dev = port->dev;
1594 struct cx23885_dmaqueue *q = &port->mpegq;
1596 dprintk(1, "%s()\n", __func__);
1597 del_timer_sync(&q->timeout);
1598 cx23885_stop_dma(port);
1599 do_cancel_buffers(port, "cancel", 0);
1602 static void cx23885_timeout(unsigned long data)
1604 struct cx23885_tsport *port = (struct cx23885_tsport *)data;
1605 struct cx23885_dev *dev = port->dev;
1607 dprintk(1, "%s()\n", __func__);
1609 if (debug > 5)
1610 cx23885_sram_channel_dump(dev,
1611 &dev->sram_channels[port->sram_chno]);
1613 cx23885_stop_dma(port);
1614 do_cancel_buffers(port, "timeout", 1);
1617 int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1619 /* FIXME: port1 assumption here. */
1620 struct cx23885_tsport *port = &dev->ts1;
1621 int count = 0;
1622 int handled = 0;
1624 if (status == 0)
1625 return handled;
1627 count = cx_read(port->reg_gpcnt);
1628 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1629 status, cx_read(port->reg_ts_int_msk), count);
1631 if ((status & VID_B_MSK_BAD_PKT) ||
1632 (status & VID_B_MSK_OPC_ERR) ||
1633 (status & VID_B_MSK_VBI_OPC_ERR) ||
1634 (status & VID_B_MSK_SYNC) ||
1635 (status & VID_B_MSK_VBI_SYNC) ||
1636 (status & VID_B_MSK_OF) ||
1637 (status & VID_B_MSK_VBI_OF)) {
1638 printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
1639 "= 0x%x\n", dev->name, status);
1640 if (status & VID_B_MSK_BAD_PKT)
1641 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1642 if (status & VID_B_MSK_OPC_ERR)
1643 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1644 if (status & VID_B_MSK_VBI_OPC_ERR)
1645 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1646 if (status & VID_B_MSK_SYNC)
1647 dprintk(1, " VID_B_MSK_SYNC\n");
1648 if (status & VID_B_MSK_VBI_SYNC)
1649 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1650 if (status & VID_B_MSK_OF)
1651 dprintk(1, " VID_B_MSK_OF\n");
1652 if (status & VID_B_MSK_VBI_OF)
1653 dprintk(1, " VID_B_MSK_VBI_OF\n");
1655 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1656 cx23885_sram_channel_dump(dev,
1657 &dev->sram_channels[port->sram_chno]);
1658 cx23885_417_check_encoder(dev);
1659 } else if (status & VID_B_MSK_RISCI1) {
1660 dprintk(7, " VID_B_MSK_RISCI1\n");
1661 spin_lock(&port->slock);
1662 cx23885_wakeup(port, &port->mpegq, count);
1663 spin_unlock(&port->slock);
1664 } else if (status & VID_B_MSK_RISCI2) {
1665 dprintk(7, " VID_B_MSK_RISCI2\n");
1666 spin_lock(&port->slock);
1667 cx23885_restart_queue(port, &port->mpegq);
1668 spin_unlock(&port->slock);
1670 if (status) {
1671 cx_write(port->reg_ts_int_stat, status);
1672 handled = 1;
1675 return handled;
1678 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1680 struct cx23885_dev *dev = port->dev;
1681 int handled = 0;
1682 u32 count;
1684 if ((status & VID_BC_MSK_OPC_ERR) ||
1685 (status & VID_BC_MSK_BAD_PKT) ||
1686 (status & VID_BC_MSK_SYNC) ||
1687 (status & VID_BC_MSK_OF)) {
1689 if (status & VID_BC_MSK_OPC_ERR)
1690 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1691 VID_BC_MSK_OPC_ERR);
1693 if (status & VID_BC_MSK_BAD_PKT)
1694 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1695 VID_BC_MSK_BAD_PKT);
1697 if (status & VID_BC_MSK_SYNC)
1698 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1699 VID_BC_MSK_SYNC);
1701 if (status & VID_BC_MSK_OF)
1702 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1703 VID_BC_MSK_OF);
1705 printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1707 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1708 cx23885_sram_channel_dump(dev,
1709 &dev->sram_channels[port->sram_chno]);
1711 } else if (status & VID_BC_MSK_RISCI1) {
1713 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1715 spin_lock(&port->slock);
1716 count = cx_read(port->reg_gpcnt);
1717 cx23885_wakeup(port, &port->mpegq, count);
1718 spin_unlock(&port->slock);
1720 } else if (status & VID_BC_MSK_RISCI2) {
1722 dprintk(7, " (RISCI2 0x%08x)\n", VID_BC_MSK_RISCI2);
1724 spin_lock(&port->slock);
1725 cx23885_restart_queue(port, &port->mpegq);
1726 spin_unlock(&port->slock);
1729 if (status) {
1730 cx_write(port->reg_ts_int_stat, status);
1731 handled = 1;
1734 return handled;
1737 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1739 struct cx23885_dev *dev = dev_id;
1740 struct cx23885_tsport *ts1 = &dev->ts1;
1741 struct cx23885_tsport *ts2 = &dev->ts2;
1742 u32 pci_status, pci_mask;
1743 u32 vida_status, vida_mask;
1744 u32 ts1_status, ts1_mask;
1745 u32 ts2_status, ts2_mask;
1746 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1747 bool subdev_handled;
1749 pci_status = cx_read(PCI_INT_STAT);
1750 pci_mask = cx23885_irq_get_mask(dev);
1751 vida_status = cx_read(VID_A_INT_STAT);
1752 vida_mask = cx_read(VID_A_INT_MSK);
1753 ts1_status = cx_read(VID_B_INT_STAT);
1754 ts1_mask = cx_read(VID_B_INT_MSK);
1755 ts2_status = cx_read(VID_C_INT_STAT);
1756 ts2_mask = cx_read(VID_C_INT_MSK);
1758 if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
1759 goto out;
1761 vida_count = cx_read(VID_A_GPCNT);
1762 ts1_count = cx_read(ts1->reg_gpcnt);
1763 ts2_count = cx_read(ts2->reg_gpcnt);
1764 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1765 pci_status, pci_mask);
1766 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1767 vida_status, vida_mask, vida_count);
1768 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1769 ts1_status, ts1_mask, ts1_count);
1770 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1771 ts2_status, ts2_mask, ts2_count);
1773 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1774 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1775 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1776 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1777 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
1778 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1780 if (pci_status & PCI_MSK_RISC_RD)
1781 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1782 PCI_MSK_RISC_RD);
1784 if (pci_status & PCI_MSK_RISC_WR)
1785 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1786 PCI_MSK_RISC_WR);
1788 if (pci_status & PCI_MSK_AL_RD)
1789 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1790 PCI_MSK_AL_RD);
1792 if (pci_status & PCI_MSK_AL_WR)
1793 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1794 PCI_MSK_AL_WR);
1796 if (pci_status & PCI_MSK_APB_DMA)
1797 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1798 PCI_MSK_APB_DMA);
1800 if (pci_status & PCI_MSK_VID_C)
1801 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1802 PCI_MSK_VID_C);
1804 if (pci_status & PCI_MSK_VID_B)
1805 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1806 PCI_MSK_VID_B);
1808 if (pci_status & PCI_MSK_VID_A)
1809 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1810 PCI_MSK_VID_A);
1812 if (pci_status & PCI_MSK_AUD_INT)
1813 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1814 PCI_MSK_AUD_INT);
1816 if (pci_status & PCI_MSK_AUD_EXT)
1817 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1818 PCI_MSK_AUD_EXT);
1820 if (pci_status & PCI_MSK_GPIO0)
1821 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1822 PCI_MSK_GPIO0);
1824 if (pci_status & PCI_MSK_GPIO1)
1825 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1826 PCI_MSK_GPIO1);
1828 if (pci_status & PCI_MSK_AV_CORE)
1829 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1830 PCI_MSK_AV_CORE);
1832 if (pci_status & PCI_MSK_IR)
1833 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1834 PCI_MSK_IR);
1837 if (cx23885_boards[dev->board].ci_type == 1 &&
1838 (pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1839 handled += netup_ci_slot_status(dev, pci_status);
1841 if (cx23885_boards[dev->board].ci_type == 2 &&
1842 (pci_status & PCI_MSK_GPIO0))
1843 handled += altera_ci_irq(dev);
1845 if (ts1_status) {
1846 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1847 handled += cx23885_irq_ts(ts1, ts1_status);
1848 else
1849 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1850 handled += cx23885_irq_417(dev, ts1_status);
1853 if (ts2_status) {
1854 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1855 handled += cx23885_irq_ts(ts2, ts2_status);
1856 else
1857 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1858 handled += cx23885_irq_417(dev, ts2_status);
1861 if (vida_status)
1862 handled += cx23885_video_irq(dev, vida_status);
1864 if (pci_status & PCI_MSK_IR) {
1865 subdev_handled = false;
1866 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1867 pci_status, &subdev_handled);
1868 if (subdev_handled)
1869 handled++;
1872 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1873 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1874 if (!schedule_work(&dev->cx25840_work))
1875 printk(KERN_ERR "%s: failed to set up deferred work for"
1876 " AV Core/IR interrupt. Interrupt is disabled"
1877 " and won't be re-enabled\n", dev->name);
1878 handled++;
1881 if (handled)
1882 cx_write(PCI_INT_STAT, pci_status);
1883 out:
1884 return IRQ_RETVAL(handled);
1887 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1888 unsigned int notification, void *arg)
1890 struct cx23885_dev *dev;
1892 if (sd == NULL)
1893 return;
1895 dev = to_cx23885(sd->v4l2_dev);
1897 switch (notification) {
1898 case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1899 if (sd == dev->sd_ir)
1900 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1901 break;
1902 case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1903 if (sd == dev->sd_ir)
1904 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1905 break;
1909 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1911 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1912 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1913 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1914 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1917 static inline int encoder_on_portb(struct cx23885_dev *dev)
1919 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1922 static inline int encoder_on_portc(struct cx23885_dev *dev)
1924 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1927 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1928 * registers depending on the board configuration (and whether the
1929 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1930 * be pushed into the correct hardware register, regardless of the
1931 * physical location. Certain registers are shared so we sanity check
1932 * and report errors if we think we're tampering with a GPIo that might
1933 * be assigned to the encoder (and used for the host bus).
1935 * GPIO 2 thru 0 - On the cx23885 bridge
1936 * GPIO 18 thru 3 - On the cx23417 host bus interface
1937 * GPIO 23 thru 19 - On the cx25840 a/v core
1939 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1941 if (mask & 0x7)
1942 cx_set(GP0_IO, mask & 0x7);
1944 if (mask & 0x0007fff8) {
1945 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1946 printk(KERN_ERR
1947 "%s: Setting GPIO on encoder ports\n",
1948 dev->name);
1949 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1952 /* TODO: 23-19 */
1953 if (mask & 0x00f80000)
1954 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1957 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1959 if (mask & 0x00000007)
1960 cx_clear(GP0_IO, mask & 0x7);
1962 if (mask & 0x0007fff8) {
1963 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1964 printk(KERN_ERR
1965 "%s: Clearing GPIO moving on encoder ports\n",
1966 dev->name);
1967 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1970 /* TODO: 23-19 */
1971 if (mask & 0x00f80000)
1972 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1975 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
1977 if (mask & 0x00000007)
1978 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
1980 if (mask & 0x0007fff8) {
1981 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1982 printk(KERN_ERR
1983 "%s: Reading GPIO moving on encoder ports\n",
1984 dev->name);
1985 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
1988 /* TODO: 23-19 */
1989 if (mask & 0x00f80000)
1990 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1992 return 0;
1995 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1997 if ((mask & 0x00000007) && asoutput)
1998 cx_set(GP0_IO, (mask & 0x7) << 16);
1999 else if ((mask & 0x00000007) && !asoutput)
2000 cx_clear(GP0_IO, (mask & 0x7) << 16);
2002 if (mask & 0x0007fff8) {
2003 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2004 printk(KERN_ERR
2005 "%s: Enabling GPIO on encoder ports\n",
2006 dev->name);
2009 /* MC417_OEN is active low for output, write 1 for an input */
2010 if ((mask & 0x0007fff8) && asoutput)
2011 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
2013 else if ((mask & 0x0007fff8) && !asoutput)
2014 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
2016 /* TODO: 23-19 */
2019 static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
2020 const struct pci_device_id *pci_id)
2022 struct cx23885_dev *dev;
2023 int err;
2025 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2026 if (NULL == dev)
2027 return -ENOMEM;
2029 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
2030 if (err < 0)
2031 goto fail_free;
2033 /* Prepare to handle notifications from subdevices */
2034 cx23885_v4l2_dev_notify_init(dev);
2036 /* pci init */
2037 dev->pci = pci_dev;
2038 if (pci_enable_device(pci_dev)) {
2039 err = -EIO;
2040 goto fail_unreg;
2043 if (cx23885_dev_setup(dev) < 0) {
2044 err = -EINVAL;
2045 goto fail_unreg;
2048 /* print pci info */
2049 dev->pci_rev = pci_dev->revision;
2050 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
2051 printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
2052 "latency: %d, mmio: 0x%llx\n", dev->name,
2053 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
2054 dev->pci_lat,
2055 (unsigned long long)pci_resource_start(pci_dev, 0));
2057 pci_set_master(pci_dev);
2058 if (!pci_dma_supported(pci_dev, 0xffffffff)) {
2059 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
2060 err = -EIO;
2061 goto fail_irq;
2064 err = request_irq(pci_dev->irq, cx23885_irq,
2065 IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
2066 if (err < 0) {
2067 printk(KERN_ERR "%s: can't get IRQ %d\n",
2068 dev->name, pci_dev->irq);
2069 goto fail_irq;
2072 switch (dev->board) {
2073 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2074 cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2075 break;
2076 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2077 cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
2078 break;
2082 * The CX2388[58] IR controller can start firing interrupts when
2083 * enabled, so these have to take place after the cx23885_irq() handler
2084 * is hooked up by the call to request_irq() above.
2086 cx23885_ir_pci_int_enable(dev);
2087 cx23885_input_init(dev);
2089 return 0;
2091 fail_irq:
2092 cx23885_dev_unregister(dev);
2093 fail_unreg:
2094 v4l2_device_unregister(&dev->v4l2_dev);
2095 fail_free:
2096 kfree(dev);
2097 return err;
2100 static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
2102 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2103 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2105 cx23885_input_fini(dev);
2106 cx23885_ir_fini(dev);
2108 cx23885_shutdown(dev);
2110 pci_disable_device(pci_dev);
2112 /* unregister stuff */
2113 free_irq(pci_dev->irq, dev);
2115 cx23885_dev_unregister(dev);
2116 v4l2_device_unregister(v4l2_dev);
2117 kfree(dev);
2120 static struct pci_device_id cx23885_pci_tbl[] = {
2122 /* CX23885 */
2123 .vendor = 0x14f1,
2124 .device = 0x8852,
2125 .subvendor = PCI_ANY_ID,
2126 .subdevice = PCI_ANY_ID,
2127 }, {
2128 /* CX23887 Rev 2 */
2129 .vendor = 0x14f1,
2130 .device = 0x8880,
2131 .subvendor = PCI_ANY_ID,
2132 .subdevice = PCI_ANY_ID,
2133 }, {
2134 /* --- end of list --- */
2137 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2139 static struct pci_driver cx23885_pci_driver = {
2140 .name = "cx23885",
2141 .id_table = cx23885_pci_tbl,
2142 .probe = cx23885_initdev,
2143 .remove = __devexit_p(cx23885_finidev),
2144 /* TODO */
2145 .suspend = NULL,
2146 .resume = NULL,
2149 static int __init cx23885_init(void)
2151 printk(KERN_INFO "cx23885 driver version %s loaded\n",
2152 CX23885_VERSION);
2153 return pci_register_driver(&cx23885_pci_driver);
2156 static void __exit cx23885_fini(void)
2158 pci_unregister_driver(&cx23885_pci_driver);
2161 module_init(cx23885_init);
2162 module_exit(cx23885_fini);