2 * Driver for the Conexant CX23885 PCIe bridge
4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/kmod.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <asm/div64.h>
32 #include <linux/firmware.h>
36 #include "altera-ci.h"
37 #include "cx23888-ir.h"
38 #include "cx23885-ir.h"
39 #include "cx23885-av.h"
40 #include "cx23885-input.h"
42 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
43 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(CX23885_VERSION
);
47 static unsigned int debug
;
48 module_param(debug
, int, 0644);
49 MODULE_PARM_DESC(debug
, "enable debug messages");
51 static unsigned int card
[] = {[0 ... (CX23885_MAXBOARDS
- 1)] = UNSET
};
52 module_param_array(card
, int, NULL
, 0444);
53 MODULE_PARM_DESC(card
, "card type");
55 #define dprintk(level, fmt, arg...)\
56 do { if (debug >= level)\
57 printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
60 static unsigned int cx23885_devcount
;
62 #define NO_SYNC_LINE (-1U)
64 /* FIXME, these allocations will change when
65 * analog arrives. The be reviewed.
67 * 1 line = 16 bytes of CDT
69 * cdt size = 16 * linesize
74 * 0x00000000 0x00008fff FIFO clusters
75 * 0x00010000 0x000104af Channel Management Data Structures
76 * 0x000104b0 0x000104ff Free
77 * 0x00010500 0x000108bf 15 channels * iqsize
78 * 0x000108c0 0x000108ff Free
79 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
80 * 15 channels * (iqsize + (maxlines * linesize))
81 * 0x00010ea0 0x00010xxx Free
84 static struct sram_channel cx23885_sram_channels
[] = {
87 .cmds_start
= 0x10000,
88 .ctrl_start
= 0x10380,
92 .ptr1_reg
= DMA1_PTR1
,
93 .ptr2_reg
= DMA1_PTR2
,
94 .cnt1_reg
= DMA1_CNT1
,
95 .cnt2_reg
= DMA1_CNT2
,
104 .ptr1_reg
= DMA2_PTR1
,
105 .ptr2_reg
= DMA2_PTR2
,
106 .cnt1_reg
= DMA2_CNT1
,
107 .cnt2_reg
= DMA2_CNT2
,
111 .cmds_start
= 0x100A0,
112 .ctrl_start
= 0x10400,
114 .fifo_start
= 0x5000,
116 .ptr1_reg
= DMA3_PTR1
,
117 .ptr2_reg
= DMA3_PTR2
,
118 .cnt1_reg
= DMA3_CNT1
,
119 .cnt2_reg
= DMA3_CNT2
,
128 .ptr1_reg
= DMA4_PTR1
,
129 .ptr2_reg
= DMA4_PTR2
,
130 .cnt1_reg
= DMA4_CNT1
,
131 .cnt2_reg
= DMA4_CNT2
,
140 .ptr1_reg
= DMA5_PTR1
,
141 .ptr2_reg
= DMA5_PTR2
,
142 .cnt1_reg
= DMA5_CNT1
,
143 .cnt2_reg
= DMA5_CNT2
,
147 .cmds_start
= 0x10140,
148 .ctrl_start
= 0x10440,
150 .fifo_start
= 0x6000,
152 .ptr1_reg
= DMA5_PTR1
,
153 .ptr2_reg
= DMA5_PTR2
,
154 .cnt1_reg
= DMA5_CNT1
,
155 .cnt2_reg
= DMA5_CNT2
,
159 .cmds_start
= 0x10190,
160 .ctrl_start
= 0x10480,
162 .fifo_start
= 0x7000,
164 .ptr1_reg
= DMA6_PTR1
,
165 .ptr2_reg
= DMA6_PTR2
,
166 .cnt1_reg
= DMA6_CNT1
,
167 .cnt2_reg
= DMA6_CNT2
,
176 .ptr1_reg
= DMA7_PTR1
,
177 .ptr2_reg
= DMA7_PTR2
,
178 .cnt1_reg
= DMA7_CNT1
,
179 .cnt2_reg
= DMA7_CNT2
,
188 .ptr1_reg
= DMA8_PTR1
,
189 .ptr2_reg
= DMA8_PTR2
,
190 .cnt1_reg
= DMA8_CNT1
,
191 .cnt2_reg
= DMA8_CNT2
,
195 static struct sram_channel cx23887_sram_channels
[] = {
198 .cmds_start
= 0x10000,
199 .ctrl_start
= 0x105b0,
203 .ptr1_reg
= DMA1_PTR1
,
204 .ptr2_reg
= DMA1_PTR2
,
205 .cnt1_reg
= DMA1_CNT1
,
206 .cnt2_reg
= DMA1_CNT2
,
209 .name
= "VID A (VBI)",
210 .cmds_start
= 0x10050,
211 .ctrl_start
= 0x105F0,
213 .fifo_start
= 0x3000,
215 .ptr1_reg
= DMA2_PTR1
,
216 .ptr2_reg
= DMA2_PTR2
,
217 .cnt1_reg
= DMA2_CNT1
,
218 .cnt2_reg
= DMA2_CNT2
,
222 .cmds_start
= 0x100A0,
223 .ctrl_start
= 0x10630,
225 .fifo_start
= 0x5000,
227 .ptr1_reg
= DMA3_PTR1
,
228 .ptr2_reg
= DMA3_PTR2
,
229 .cnt1_reg
= DMA3_CNT1
,
230 .cnt2_reg
= DMA3_CNT2
,
239 .ptr1_reg
= DMA4_PTR1
,
240 .ptr2_reg
= DMA4_PTR2
,
241 .cnt1_reg
= DMA4_CNT1
,
242 .cnt2_reg
= DMA4_CNT2
,
251 .ptr1_reg
= DMA5_PTR1
,
252 .ptr2_reg
= DMA5_PTR2
,
253 .cnt1_reg
= DMA5_CNT1
,
254 .cnt2_reg
= DMA5_CNT2
,
258 .cmds_start
= 0x10140,
259 .ctrl_start
= 0x10670,
261 .fifo_start
= 0x6000,
263 .ptr1_reg
= DMA5_PTR1
,
264 .ptr2_reg
= DMA5_PTR2
,
265 .cnt1_reg
= DMA5_CNT1
,
266 .cnt2_reg
= DMA5_CNT2
,
270 .cmds_start
= 0x10190,
271 .ctrl_start
= 0x106B0,
273 .fifo_start
= 0x7000,
275 .ptr1_reg
= DMA6_PTR1
,
276 .ptr2_reg
= DMA6_PTR2
,
277 .cnt1_reg
= DMA6_CNT1
,
278 .cnt2_reg
= DMA6_CNT2
,
287 .ptr1_reg
= DMA7_PTR1
,
288 .ptr2_reg
= DMA7_PTR2
,
289 .cnt1_reg
= DMA7_CNT1
,
290 .cnt2_reg
= DMA7_CNT2
,
299 .ptr1_reg
= DMA8_PTR1
,
300 .ptr2_reg
= DMA8_PTR2
,
301 .cnt1_reg
= DMA8_CNT1
,
302 .cnt2_reg
= DMA8_CNT2
,
306 void cx23885_irq_add(struct cx23885_dev
*dev
, u32 mask
)
309 spin_lock_irqsave(&dev
->pci_irqmask_lock
, flags
);
311 dev
->pci_irqmask
|= mask
;
313 spin_unlock_irqrestore(&dev
->pci_irqmask_lock
, flags
);
316 void cx23885_irq_add_enable(struct cx23885_dev
*dev
, u32 mask
)
319 spin_lock_irqsave(&dev
->pci_irqmask_lock
, flags
);
321 dev
->pci_irqmask
|= mask
;
322 cx_set(PCI_INT_MSK
, mask
);
324 spin_unlock_irqrestore(&dev
->pci_irqmask_lock
, flags
);
327 void cx23885_irq_enable(struct cx23885_dev
*dev
, u32 mask
)
331 spin_lock_irqsave(&dev
->pci_irqmask_lock
, flags
);
333 v
= mask
& dev
->pci_irqmask
;
335 cx_set(PCI_INT_MSK
, v
);
337 spin_unlock_irqrestore(&dev
->pci_irqmask_lock
, flags
);
340 static inline void cx23885_irq_enable_all(struct cx23885_dev
*dev
)
342 cx23885_irq_enable(dev
, 0xffffffff);
345 void cx23885_irq_disable(struct cx23885_dev
*dev
, u32 mask
)
348 spin_lock_irqsave(&dev
->pci_irqmask_lock
, flags
);
350 cx_clear(PCI_INT_MSK
, mask
);
352 spin_unlock_irqrestore(&dev
->pci_irqmask_lock
, flags
);
355 static inline void cx23885_irq_disable_all(struct cx23885_dev
*dev
)
357 cx23885_irq_disable(dev
, 0xffffffff);
360 void cx23885_irq_remove(struct cx23885_dev
*dev
, u32 mask
)
363 spin_lock_irqsave(&dev
->pci_irqmask_lock
, flags
);
365 dev
->pci_irqmask
&= ~mask
;
366 cx_clear(PCI_INT_MSK
, mask
);
368 spin_unlock_irqrestore(&dev
->pci_irqmask_lock
, flags
);
371 static u32
cx23885_irq_get_mask(struct cx23885_dev
*dev
)
375 spin_lock_irqsave(&dev
->pci_irqmask_lock
, flags
);
377 v
= cx_read(PCI_INT_MSK
);
379 spin_unlock_irqrestore(&dev
->pci_irqmask_lock
, flags
);
383 static int cx23885_risc_decode(u32 risc
)
385 static char *instr
[16] = {
386 [RISC_SYNC
>> 28] = "sync",
387 [RISC_WRITE
>> 28] = "write",
388 [RISC_WRITEC
>> 28] = "writec",
389 [RISC_READ
>> 28] = "read",
390 [RISC_READC
>> 28] = "readc",
391 [RISC_JUMP
>> 28] = "jump",
392 [RISC_SKIP
>> 28] = "skip",
393 [RISC_WRITERM
>> 28] = "writerm",
394 [RISC_WRITECM
>> 28] = "writecm",
395 [RISC_WRITECR
>> 28] = "writecr",
397 static int incr
[16] = {
398 [RISC_WRITE
>> 28] = 3,
399 [RISC_JUMP
>> 28] = 3,
400 [RISC_SKIP
>> 28] = 1,
401 [RISC_SYNC
>> 28] = 1,
402 [RISC_WRITERM
>> 28] = 3,
403 [RISC_WRITECM
>> 28] = 3,
404 [RISC_WRITECR
>> 28] = 4,
406 static char *bits
[] = {
407 "12", "13", "14", "resync",
408 "cnt0", "cnt1", "18", "19",
409 "20", "21", "22", "23",
410 "irq1", "irq2", "eol", "sol",
414 printk("0x%08x [ %s", risc
,
415 instr
[risc
>> 28] ? instr
[risc
>> 28] : "INVALID");
416 for (i
= ARRAY_SIZE(bits
) - 1; i
>= 0; i
--)
417 if (risc
& (1 << (i
+ 12)))
418 printk(" %s", bits
[i
]);
419 printk(" count=%d ]\n", risc
& 0xfff);
420 return incr
[risc
>> 28] ? incr
[risc
>> 28] : 1;
423 void cx23885_wakeup(struct cx23885_tsport
*port
,
424 struct cx23885_dmaqueue
*q
, u32 count
)
426 struct cx23885_dev
*dev
= port
->dev
;
427 struct cx23885_buffer
*buf
;
430 for (bc
= 0;; bc
++) {
431 if (list_empty(&q
->active
))
433 buf
= list_entry(q
->active
.next
,
434 struct cx23885_buffer
, vb
.queue
);
436 /* count comes from the hw and is is 16bit wide --
437 * this trick handles wrap-arounds correctly for
438 * up to 32767 buffers in flight... */
439 if ((s16
) (count
- buf
->count
) < 0)
442 do_gettimeofday(&buf
->vb
.ts
);
443 dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf
, buf
->vb
.i
,
445 buf
->vb
.state
= VIDEOBUF_DONE
;
446 list_del(&buf
->vb
.queue
);
447 wake_up(&buf
->vb
.done
);
449 if (list_empty(&q
->active
))
450 del_timer(&q
->timeout
);
452 mod_timer(&q
->timeout
, jiffies
+ BUFFER_TIMEOUT
);
454 printk(KERN_WARNING
"%s: %d buffers handled (should be 1)\n",
458 int cx23885_sram_channel_setup(struct cx23885_dev
*dev
,
459 struct sram_channel
*ch
,
460 unsigned int bpl
, u32 risc
)
462 unsigned int i
, lines
;
465 if (ch
->cmds_start
== 0) {
466 dprintk(1, "%s() Erasing channel [%s]\n", __func__
,
468 cx_write(ch
->ptr1_reg
, 0);
469 cx_write(ch
->ptr2_reg
, 0);
470 cx_write(ch
->cnt2_reg
, 0);
471 cx_write(ch
->cnt1_reg
, 0);
474 dprintk(1, "%s() Configuring channel [%s]\n", __func__
,
478 bpl
= (bpl
+ 7) & ~7; /* alignment */
480 lines
= ch
->fifo_size
/ bpl
;
485 cx_write(8 + 0, RISC_JUMP
| RISC_IRQ1
| RISC_CNT_INC
);
490 for (i
= 0; i
< lines
; i
++) {
491 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__
, cdt
+ 16*i
,
492 ch
->fifo_start
+ bpl
*i
);
493 cx_write(cdt
+ 16*i
, ch
->fifo_start
+ bpl
*i
);
494 cx_write(cdt
+ 16*i
+ 4, 0);
495 cx_write(cdt
+ 16*i
+ 8, 0);
496 cx_write(cdt
+ 16*i
+ 12, 0);
501 cx_write(ch
->cmds_start
+ 0, 8);
503 cx_write(ch
->cmds_start
+ 0, risc
);
504 cx_write(ch
->cmds_start
+ 4, 0); /* 64 bits 63-32 */
505 cx_write(ch
->cmds_start
+ 8, cdt
);
506 cx_write(ch
->cmds_start
+ 12, (lines
*16) >> 3);
507 cx_write(ch
->cmds_start
+ 16, ch
->ctrl_start
);
509 cx_write(ch
->cmds_start
+ 20, 0x80000000 | (64 >> 2));
511 cx_write(ch
->cmds_start
+ 20, 64 >> 2);
512 for (i
= 24; i
< 80; i
+= 4)
513 cx_write(ch
->cmds_start
+ i
, 0);
516 cx_write(ch
->ptr1_reg
, ch
->fifo_start
);
517 cx_write(ch
->ptr2_reg
, cdt
);
518 cx_write(ch
->cnt2_reg
, (lines
*16) >> 3);
519 cx_write(ch
->cnt1_reg
, (bpl
>> 3) - 1);
521 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
530 void cx23885_sram_channel_dump(struct cx23885_dev
*dev
,
531 struct sram_channel
*ch
)
533 static char *name
[] = {
550 unsigned int i
, j
, n
;
552 printk(KERN_WARNING
"%s: %s - dma channel status dump\n",
553 dev
->name
, ch
->name
);
554 for (i
= 0; i
< ARRAY_SIZE(name
); i
++)
555 printk(KERN_WARNING
"%s: cmds: %-15s: 0x%08x\n",
557 cx_read(ch
->cmds_start
+ 4*i
));
559 for (i
= 0; i
< 4; i
++) {
560 risc
= cx_read(ch
->cmds_start
+ 4 * (i
+ 14));
561 printk(KERN_WARNING
"%s: risc%d: ", dev
->name
, i
);
562 cx23885_risc_decode(risc
);
564 for (i
= 0; i
< (64 >> 2); i
+= n
) {
565 risc
= cx_read(ch
->ctrl_start
+ 4 * i
);
566 /* No consideration for bits 63-32 */
568 printk(KERN_WARNING
"%s: (0x%08x) iq %x: ", dev
->name
,
569 ch
->ctrl_start
+ 4 * i
, i
);
570 n
= cx23885_risc_decode(risc
);
571 for (j
= 1; j
< n
; j
++) {
572 risc
= cx_read(ch
->ctrl_start
+ 4 * (i
+ j
));
573 printk(KERN_WARNING
"%s: iq %x: 0x%08x [ arg #%d ]\n",
574 dev
->name
, i
+j
, risc
, j
);
578 printk(KERN_WARNING
"%s: fifo: 0x%08x -> 0x%x\n",
579 dev
->name
, ch
->fifo_start
, ch
->fifo_start
+ch
->fifo_size
);
580 printk(KERN_WARNING
"%s: ctrl: 0x%08x -> 0x%x\n",
581 dev
->name
, ch
->ctrl_start
, ch
->ctrl_start
+ 6*16);
582 printk(KERN_WARNING
"%s: ptr1_reg: 0x%08x\n",
583 dev
->name
, cx_read(ch
->ptr1_reg
));
584 printk(KERN_WARNING
"%s: ptr2_reg: 0x%08x\n",
585 dev
->name
, cx_read(ch
->ptr2_reg
));
586 printk(KERN_WARNING
"%s: cnt1_reg: 0x%08x\n",
587 dev
->name
, cx_read(ch
->cnt1_reg
));
588 printk(KERN_WARNING
"%s: cnt2_reg: 0x%08x\n",
589 dev
->name
, cx_read(ch
->cnt2_reg
));
592 static void cx23885_risc_disasm(struct cx23885_tsport
*port
,
593 struct btcx_riscmem
*risc
)
595 struct cx23885_dev
*dev
= port
->dev
;
596 unsigned int i
, j
, n
;
598 printk(KERN_INFO
"%s: risc disasm: %p [dma=0x%08lx]\n",
599 dev
->name
, risc
->cpu
, (unsigned long)risc
->dma
);
600 for (i
= 0; i
< (risc
->size
>> 2); i
+= n
) {
601 printk(KERN_INFO
"%s: %04d: ", dev
->name
, i
);
602 n
= cx23885_risc_decode(le32_to_cpu(risc
->cpu
[i
]));
603 for (j
= 1; j
< n
; j
++)
604 printk(KERN_INFO
"%s: %04d: 0x%08x [ arg #%d ]\n",
605 dev
->name
, i
+ j
, risc
->cpu
[i
+ j
], j
);
606 if (risc
->cpu
[i
] == cpu_to_le32(RISC_JUMP
))
611 static void cx23885_shutdown(struct cx23885_dev
*dev
)
613 /* disable RISC controller */
614 cx_write(DEV_CNTRL2
, 0);
616 /* Disable all IR activity */
617 cx_write(IR_CNTRL_REG
, 0);
619 /* Disable Video A/B activity */
620 cx_write(VID_A_DMA_CTL
, 0);
621 cx_write(VID_B_DMA_CTL
, 0);
622 cx_write(VID_C_DMA_CTL
, 0);
624 /* Disable Audio activity */
625 cx_write(AUD_INT_DMA_CTL
, 0);
626 cx_write(AUD_EXT_DMA_CTL
, 0);
628 /* Disable Serial port */
629 cx_write(UART_CTL
, 0);
631 /* Disable Interrupts */
632 cx23885_irq_disable_all(dev
);
633 cx_write(VID_A_INT_MSK
, 0);
634 cx_write(VID_B_INT_MSK
, 0);
635 cx_write(VID_C_INT_MSK
, 0);
636 cx_write(AUDIO_INT_INT_MSK
, 0);
637 cx_write(AUDIO_EXT_INT_MSK
, 0);
641 static void cx23885_reset(struct cx23885_dev
*dev
)
643 dprintk(1, "%s()\n", __func__
);
645 cx23885_shutdown(dev
);
647 cx_write(PCI_INT_STAT
, 0xffffffff);
648 cx_write(VID_A_INT_STAT
, 0xffffffff);
649 cx_write(VID_B_INT_STAT
, 0xffffffff);
650 cx_write(VID_C_INT_STAT
, 0xffffffff);
651 cx_write(AUDIO_INT_INT_STAT
, 0xffffffff);
652 cx_write(AUDIO_EXT_INT_STAT
, 0xffffffff);
653 cx_write(CLK_DELAY
, cx_read(CLK_DELAY
) & 0x80000000);
654 cx_write(PAD_CTRL
, 0x00500300);
658 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH01
],
660 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH02
], 128, 0);
661 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH03
],
663 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH04
], 128, 0);
664 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH05
], 128, 0);
665 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH06
],
667 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH07
], 128, 0);
668 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH08
], 128, 0);
669 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH09
], 128, 0);
671 cx23885_gpio_setup(dev
);
675 static int cx23885_pci_quirks(struct cx23885_dev
*dev
)
677 dprintk(1, "%s()\n", __func__
);
679 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
680 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
681 * occur on the cx23887 bridge.
683 if (dev
->bridge
== CX23885_BRIDGE_885
)
684 cx_clear(RDR_TLCTL0
, 1 << 4);
689 static int get_resources(struct cx23885_dev
*dev
)
691 if (request_mem_region(pci_resource_start(dev
->pci
, 0),
692 pci_resource_len(dev
->pci
, 0),
696 printk(KERN_ERR
"%s: can't get MMIO memory @ 0x%llx\n",
697 dev
->name
, (unsigned long long)pci_resource_start(dev
->pci
, 0));
702 static void cx23885_timeout(unsigned long data
);
703 int cx23885_risc_stopper(struct pci_dev
*pci
, struct btcx_riscmem
*risc
,
704 u32 reg
, u32 mask
, u32 value
);
706 static int cx23885_init_tsport(struct cx23885_dev
*dev
,
707 struct cx23885_tsport
*port
, int portno
)
709 dprintk(1, "%s(portno=%d)\n", __func__
, portno
);
711 /* Transport bus init dma queue - Common settings */
712 port
->dma_ctl_val
= 0x11; /* Enable RISC controller and Fifo */
713 port
->ts_int_msk_val
= 0x1111; /* TS port bits for RISC */
714 port
->vld_misc_val
= 0x0;
715 port
->hw_sop_ctrl_val
= (0x47 << 16 | 188 << 4);
717 spin_lock_init(&port
->slock
);
721 INIT_LIST_HEAD(&port
->mpegq
.active
);
722 INIT_LIST_HEAD(&port
->mpegq
.queued
);
723 port
->mpegq
.timeout
.function
= cx23885_timeout
;
724 port
->mpegq
.timeout
.data
= (unsigned long)port
;
725 init_timer(&port
->mpegq
.timeout
);
727 mutex_init(&port
->frontends
.lock
);
728 INIT_LIST_HEAD(&port
->frontends
.felist
);
729 port
->frontends
.active_fe_id
= 0;
731 /* This should be hardcoded allow a single frontend
732 * attachment to this tsport, keeping the -dvb.c
733 * code clean and safe.
735 if (!port
->num_frontends
)
736 port
->num_frontends
= 1;
740 port
->reg_gpcnt
= VID_B_GPCNT
;
741 port
->reg_gpcnt_ctl
= VID_B_GPCNT_CTL
;
742 port
->reg_dma_ctl
= VID_B_DMA_CTL
;
743 port
->reg_lngth
= VID_B_LNGTH
;
744 port
->reg_hw_sop_ctrl
= VID_B_HW_SOP_CTL
;
745 port
->reg_gen_ctrl
= VID_B_GEN_CTL
;
746 port
->reg_bd_pkt_status
= VID_B_BD_PKT_STATUS
;
747 port
->reg_sop_status
= VID_B_SOP_STATUS
;
748 port
->reg_fifo_ovfl_stat
= VID_B_FIFO_OVFL_STAT
;
749 port
->reg_vld_misc
= VID_B_VLD_MISC
;
750 port
->reg_ts_clk_en
= VID_B_TS_CLK_EN
;
751 port
->reg_src_sel
= VID_B_SRC_SEL
;
752 port
->reg_ts_int_msk
= VID_B_INT_MSK
;
753 port
->reg_ts_int_stat
= VID_B_INT_STAT
;
754 port
->sram_chno
= SRAM_CH03
; /* VID_B */
755 port
->pci_irqmask
= 0x02; /* VID_B bit1 */
758 port
->reg_gpcnt
= VID_C_GPCNT
;
759 port
->reg_gpcnt_ctl
= VID_C_GPCNT_CTL
;
760 port
->reg_dma_ctl
= VID_C_DMA_CTL
;
761 port
->reg_lngth
= VID_C_LNGTH
;
762 port
->reg_hw_sop_ctrl
= VID_C_HW_SOP_CTL
;
763 port
->reg_gen_ctrl
= VID_C_GEN_CTL
;
764 port
->reg_bd_pkt_status
= VID_C_BD_PKT_STATUS
;
765 port
->reg_sop_status
= VID_C_SOP_STATUS
;
766 port
->reg_fifo_ovfl_stat
= VID_C_FIFO_OVFL_STAT
;
767 port
->reg_vld_misc
= VID_C_VLD_MISC
;
768 port
->reg_ts_clk_en
= VID_C_TS_CLK_EN
;
769 port
->reg_src_sel
= 0;
770 port
->reg_ts_int_msk
= VID_C_INT_MSK
;
771 port
->reg_ts_int_stat
= VID_C_INT_STAT
;
772 port
->sram_chno
= SRAM_CH06
; /* VID_C */
773 port
->pci_irqmask
= 0x04; /* VID_C bit2 */
779 cx23885_risc_stopper(dev
->pci
, &port
->mpegq
.stopper
,
780 port
->reg_dma_ctl
, port
->dma_ctl_val
, 0x00);
785 static void cx23885_dev_checkrevision(struct cx23885_dev
*dev
)
787 switch (cx_read(RDR_CFG2
) & 0xff) {
790 dev
->hwrevision
= 0xa0;
794 dev
->hwrevision
= 0xa1;
797 /* CX23885-13Z/14Z */
798 dev
->hwrevision
= 0xb0;
801 if (dev
->pci
->device
== 0x8880) {
802 /* CX23888-21Z/22Z */
803 dev
->hwrevision
= 0xc0;
806 dev
->hwrevision
= 0xa4;
810 if (dev
->pci
->device
== 0x8880) {
812 dev
->hwrevision
= 0xd0;
814 /* CX23885-15Z, CX23888-31Z */
815 dev
->hwrevision
= 0xa5;
820 dev
->hwrevision
= 0xc0;
824 dev
->hwrevision
= 0xb1;
827 printk(KERN_ERR
"%s() New hardware revision found 0x%x\n",
828 __func__
, dev
->hwrevision
);
831 printk(KERN_INFO
"%s() Hardware revision = 0x%02x\n",
832 __func__
, dev
->hwrevision
);
834 printk(KERN_ERR
"%s() Hardware revision unknown 0x%x\n",
835 __func__
, dev
->hwrevision
);
838 /* Find the first v4l2_subdev member of the group id in hw */
839 struct v4l2_subdev
*cx23885_find_hw(struct cx23885_dev
*dev
, u32 hw
)
841 struct v4l2_subdev
*result
= NULL
;
842 struct v4l2_subdev
*sd
;
844 spin_lock(&dev
->v4l2_dev
.lock
);
845 v4l2_device_for_each_subdev(sd
, &dev
->v4l2_dev
) {
846 if (sd
->grp_id
== hw
) {
851 spin_unlock(&dev
->v4l2_dev
.lock
);
855 static int cx23885_dev_setup(struct cx23885_dev
*dev
)
859 spin_lock_init(&dev
->pci_irqmask_lock
);
861 mutex_init(&dev
->lock
);
862 mutex_init(&dev
->gpio_lock
);
864 atomic_inc(&dev
->refcount
);
866 dev
->nr
= cx23885_devcount
++;
867 sprintf(dev
->name
, "cx23885[%d]", dev
->nr
);
869 /* Configure the internal memory */
870 if (dev
->pci
->device
== 0x8880) {
871 /* Could be 887 or 888, assume a default */
872 dev
->bridge
= CX23885_BRIDGE_887
;
873 /* Apply a sensible clock frequency for the PCIe bridge */
874 dev
->clk_freq
= 25000000;
875 dev
->sram_channels
= cx23887_sram_channels
;
877 if (dev
->pci
->device
== 0x8852) {
878 dev
->bridge
= CX23885_BRIDGE_885
;
879 /* Apply a sensible clock frequency for the PCIe bridge */
880 dev
->clk_freq
= 28000000;
881 dev
->sram_channels
= cx23885_sram_channels
;
885 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
886 __func__
, dev
->bridge
);
890 if (card
[dev
->nr
] < cx23885_bcount
)
891 dev
->board
= card
[dev
->nr
];
892 for (i
= 0; UNSET
== dev
->board
&& i
< cx23885_idcount
; i
++)
893 if (dev
->pci
->subsystem_vendor
== cx23885_subids
[i
].subvendor
&&
894 dev
->pci
->subsystem_device
== cx23885_subids
[i
].subdevice
)
895 dev
->board
= cx23885_subids
[i
].card
;
896 if (UNSET
== dev
->board
) {
897 dev
->board
= CX23885_BOARD_UNKNOWN
;
898 cx23885_card_list(dev
);
901 /* If the user specific a clk freq override, apply it */
902 if (cx23885_boards
[dev
->board
].clk_freq
> 0)
903 dev
->clk_freq
= cx23885_boards
[dev
->board
].clk_freq
;
905 dev
->pci_bus
= dev
->pci
->bus
->number
;
906 dev
->pci_slot
= PCI_SLOT(dev
->pci
->devfn
);
907 cx23885_irq_add(dev
, 0x001f00);
909 /* External Master 1 Bus */
910 dev
->i2c_bus
[0].nr
= 0;
911 dev
->i2c_bus
[0].dev
= dev
;
912 dev
->i2c_bus
[0].reg_stat
= I2C1_STAT
;
913 dev
->i2c_bus
[0].reg_ctrl
= I2C1_CTRL
;
914 dev
->i2c_bus
[0].reg_addr
= I2C1_ADDR
;
915 dev
->i2c_bus
[0].reg_rdata
= I2C1_RDATA
;
916 dev
->i2c_bus
[0].reg_wdata
= I2C1_WDATA
;
917 dev
->i2c_bus
[0].i2c_period
= (0x9d << 24); /* 100kHz */
919 /* External Master 2 Bus */
920 dev
->i2c_bus
[1].nr
= 1;
921 dev
->i2c_bus
[1].dev
= dev
;
922 dev
->i2c_bus
[1].reg_stat
= I2C2_STAT
;
923 dev
->i2c_bus
[1].reg_ctrl
= I2C2_CTRL
;
924 dev
->i2c_bus
[1].reg_addr
= I2C2_ADDR
;
925 dev
->i2c_bus
[1].reg_rdata
= I2C2_RDATA
;
926 dev
->i2c_bus
[1].reg_wdata
= I2C2_WDATA
;
927 dev
->i2c_bus
[1].i2c_period
= (0x9d << 24); /* 100kHz */
929 /* Internal Master 3 Bus */
930 dev
->i2c_bus
[2].nr
= 2;
931 dev
->i2c_bus
[2].dev
= dev
;
932 dev
->i2c_bus
[2].reg_stat
= I2C3_STAT
;
933 dev
->i2c_bus
[2].reg_ctrl
= I2C3_CTRL
;
934 dev
->i2c_bus
[2].reg_addr
= I2C3_ADDR
;
935 dev
->i2c_bus
[2].reg_rdata
= I2C3_RDATA
;
936 dev
->i2c_bus
[2].reg_wdata
= I2C3_WDATA
;
937 dev
->i2c_bus
[2].i2c_period
= (0x07 << 24); /* 1.95MHz */
939 if ((cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_DVB
) ||
940 (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
))
941 cx23885_init_tsport(dev
, &dev
->ts1
, 1);
943 if ((cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_DVB
) ||
944 (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_ENCODER
))
945 cx23885_init_tsport(dev
, &dev
->ts2
, 2);
947 if (get_resources(dev
) < 0) {
948 printk(KERN_ERR
"CORE %s No more PCIe resources for "
949 "subsystem: %04x:%04x\n",
950 dev
->name
, dev
->pci
->subsystem_vendor
,
951 dev
->pci
->subsystem_device
);
958 dev
->lmmio
= ioremap(pci_resource_start(dev
->pci
, 0),
959 pci_resource_len(dev
->pci
, 0));
961 dev
->bmmio
= (u8 __iomem
*)dev
->lmmio
;
963 printk(KERN_INFO
"CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
964 dev
->name
, dev
->pci
->subsystem_vendor
,
965 dev
->pci
->subsystem_device
, cx23885_boards
[dev
->board
].name
,
966 dev
->board
, card
[dev
->nr
] == dev
->board
?
967 "insmod option" : "autodetected");
969 cx23885_pci_quirks(dev
);
971 /* Assume some sensible defaults */
972 dev
->tuner_type
= cx23885_boards
[dev
->board
].tuner_type
;
973 dev
->tuner_addr
= cx23885_boards
[dev
->board
].tuner_addr
;
974 dev
->tuner_bus
= cx23885_boards
[dev
->board
].tuner_bus
;
975 dev
->radio_type
= cx23885_boards
[dev
->board
].radio_type
;
976 dev
->radio_addr
= cx23885_boards
[dev
->board
].radio_addr
;
978 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
979 __func__
, dev
->tuner_type
, dev
->tuner_addr
, dev
->tuner_bus
);
980 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
981 __func__
, dev
->radio_type
, dev
->radio_addr
);
983 /* The cx23417 encoder has GPIO's that need to be initialised
984 * before DVB, so that demodulators and tuners are out of
985 * reset before DVB uses them.
987 if ((cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
) ||
988 (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_ENCODER
))
989 cx23885_mc417_init(dev
);
994 cx23885_i2c_register(&dev
->i2c_bus
[0]);
995 cx23885_i2c_register(&dev
->i2c_bus
[1]);
996 cx23885_i2c_register(&dev
->i2c_bus
[2]);
997 cx23885_card_setup(dev
);
998 call_all(dev
, core
, s_power
, 0);
999 cx23885_ir_init(dev
);
1001 if (cx23885_boards
[dev
->board
].porta
== CX23885_ANALOG_VIDEO
) {
1002 if (cx23885_video_register(dev
) < 0) {
1003 printk(KERN_ERR
"%s() Failed to register analog "
1004 "video adapters on VID_A\n", __func__
);
1008 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_DVB
) {
1009 if (cx23885_boards
[dev
->board
].num_fds_portb
)
1010 dev
->ts1
.num_frontends
=
1011 cx23885_boards
[dev
->board
].num_fds_portb
;
1012 if (cx23885_dvb_register(&dev
->ts1
) < 0) {
1013 printk(KERN_ERR
"%s() Failed to register dvb adapters on VID_B\n",
1017 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
) {
1018 if (cx23885_417_register(dev
) < 0) {
1020 "%s() Failed to register 417 on VID_B\n",
1025 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_DVB
) {
1026 if (cx23885_boards
[dev
->board
].num_fds_portc
)
1027 dev
->ts2
.num_frontends
=
1028 cx23885_boards
[dev
->board
].num_fds_portc
;
1029 if (cx23885_dvb_register(&dev
->ts2
) < 0) {
1031 "%s() Failed to register dvb on VID_C\n",
1035 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_ENCODER
) {
1036 if (cx23885_417_register(dev
) < 0) {
1038 "%s() Failed to register 417 on VID_C\n",
1043 cx23885_dev_checkrevision(dev
);
1045 /* disable MSI for NetUP cards, otherwise CI is not working */
1046 if (cx23885_boards
[dev
->board
].ci_type
> 0)
1047 cx_clear(RDR_RDRCTL1
, 1 << 8);
1052 static void cx23885_dev_unregister(struct cx23885_dev
*dev
)
1054 release_mem_region(pci_resource_start(dev
->pci
, 0),
1055 pci_resource_len(dev
->pci
, 0));
1057 if (!atomic_dec_and_test(&dev
->refcount
))
1060 if (cx23885_boards
[dev
->board
].porta
== CX23885_ANALOG_VIDEO
)
1061 cx23885_video_unregister(dev
);
1063 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_DVB
)
1064 cx23885_dvb_unregister(&dev
->ts1
);
1066 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
)
1067 cx23885_417_unregister(dev
);
1069 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_DVB
)
1070 cx23885_dvb_unregister(&dev
->ts2
);
1072 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_ENCODER
)
1073 cx23885_417_unregister(dev
);
1075 cx23885_i2c_unregister(&dev
->i2c_bus
[2]);
1076 cx23885_i2c_unregister(&dev
->i2c_bus
[1]);
1077 cx23885_i2c_unregister(&dev
->i2c_bus
[0]);
1079 iounmap(dev
->lmmio
);
1082 static __le32
*cx23885_risc_field(__le32
*rp
, struct scatterlist
*sglist
,
1083 unsigned int offset
, u32 sync_line
,
1084 unsigned int bpl
, unsigned int padding
,
1085 unsigned int lines
, unsigned int lpi
)
1087 struct scatterlist
*sg
;
1088 unsigned int line
, todo
, sol
;
1090 /* sync instruction */
1091 if (sync_line
!= NO_SYNC_LINE
)
1092 *(rp
++) = cpu_to_le32(RISC_RESYNC
| sync_line
);
1096 for (line
= 0; line
< lines
; line
++) {
1097 while (offset
&& offset
>= sg_dma_len(sg
)) {
1098 offset
-= sg_dma_len(sg
);
1102 if (lpi
&& line
> 0 && !(line
% lpi
))
1103 sol
= RISC_SOL
| RISC_IRQ1
| RISC_CNT_INC
;
1107 if (bpl
<= sg_dma_len(sg
)-offset
) {
1108 /* fits into current chunk */
1109 *(rp
++) = cpu_to_le32(RISC_WRITE
|sol
|RISC_EOL
|bpl
);
1110 *(rp
++) = cpu_to_le32(sg_dma_address(sg
)+offset
);
1111 *(rp
++) = cpu_to_le32(0); /* bits 63-32 */
1114 /* scanline needs to be split */
1116 *(rp
++) = cpu_to_le32(RISC_WRITE
|sol
|
1117 (sg_dma_len(sg
)-offset
));
1118 *(rp
++) = cpu_to_le32(sg_dma_address(sg
)+offset
);
1119 *(rp
++) = cpu_to_le32(0); /* bits 63-32 */
1120 todo
-= (sg_dma_len(sg
)-offset
);
1123 while (todo
> sg_dma_len(sg
)) {
1124 *(rp
++) = cpu_to_le32(RISC_WRITE
|
1126 *(rp
++) = cpu_to_le32(sg_dma_address(sg
));
1127 *(rp
++) = cpu_to_le32(0); /* bits 63-32 */
1128 todo
-= sg_dma_len(sg
);
1131 *(rp
++) = cpu_to_le32(RISC_WRITE
|RISC_EOL
|todo
);
1132 *(rp
++) = cpu_to_le32(sg_dma_address(sg
));
1133 *(rp
++) = cpu_to_le32(0); /* bits 63-32 */
1142 int cx23885_risc_buffer(struct pci_dev
*pci
, struct btcx_riscmem
*risc
,
1143 struct scatterlist
*sglist
, unsigned int top_offset
,
1144 unsigned int bottom_offset
, unsigned int bpl
,
1145 unsigned int padding
, unsigned int lines
)
1147 u32 instructions
, fields
;
1152 if (UNSET
!= top_offset
)
1154 if (UNSET
!= bottom_offset
)
1157 /* estimate risc mem: worst case is one write per page border +
1158 one write per scan line + syncs + jump (all 2 dwords). Padding
1159 can cause next bpl to start close to a page border. First DMA
1160 region may be smaller than PAGE_SIZE */
1161 /* write and jump need and extra dword */
1162 instructions
= fields
* (1 + ((bpl
+ padding
) * lines
)
1163 / PAGE_SIZE
+ lines
);
1165 rc
= btcx_riscmem_alloc(pci
, risc
, instructions
*12);
1169 /* write risc instructions */
1171 if (UNSET
!= top_offset
)
1172 rp
= cx23885_risc_field(rp
, sglist
, top_offset
, 0,
1173 bpl
, padding
, lines
, 0);
1174 if (UNSET
!= bottom_offset
)
1175 rp
= cx23885_risc_field(rp
, sglist
, bottom_offset
, 0x200,
1176 bpl
, padding
, lines
, 0);
1178 /* save pointer to jmp instruction address */
1180 BUG_ON((risc
->jmp
- risc
->cpu
+ 2) * sizeof(*risc
->cpu
) > risc
->size
);
1184 int cx23885_risc_databuffer(struct pci_dev
*pci
,
1185 struct btcx_riscmem
*risc
,
1186 struct scatterlist
*sglist
,
1188 unsigned int lines
, unsigned int lpi
)
1194 /* estimate risc mem: worst case is one write per page border +
1195 one write per scan line + syncs + jump (all 2 dwords). Here
1196 there is no padding and no sync. First DMA region may be smaller
1198 /* Jump and write need an extra dword */
1199 instructions
= 1 + (bpl
* lines
) / PAGE_SIZE
+ lines
;
1202 rc
= btcx_riscmem_alloc(pci
, risc
, instructions
*12);
1206 /* write risc instructions */
1208 rp
= cx23885_risc_field(rp
, sglist
, 0, NO_SYNC_LINE
,
1209 bpl
, 0, lines
, lpi
);
1211 /* save pointer to jmp instruction address */
1213 BUG_ON((risc
->jmp
- risc
->cpu
+ 2) * sizeof(*risc
->cpu
) > risc
->size
);
1217 int cx23885_risc_vbibuffer(struct pci_dev
*pci
, struct btcx_riscmem
*risc
,
1218 struct scatterlist
*sglist
, unsigned int top_offset
,
1219 unsigned int bottom_offset
, unsigned int bpl
,
1220 unsigned int padding
, unsigned int lines
)
1222 u32 instructions
, fields
;
1227 if (UNSET
!= top_offset
)
1229 if (UNSET
!= bottom_offset
)
1232 /* estimate risc mem: worst case is one write per page border +
1233 one write per scan line + syncs + jump (all 2 dwords). Padding
1234 can cause next bpl to start close to a page border. First DMA
1235 region may be smaller than PAGE_SIZE */
1236 /* write and jump need and extra dword */
1237 instructions
= fields
* (1 + ((bpl
+ padding
) * lines
)
1238 / PAGE_SIZE
+ lines
);
1240 rc
= btcx_riscmem_alloc(pci
, risc
, instructions
*12);
1243 /* write risc instructions */
1246 /* Sync to line 6, so US CC line 21 will appear in line '12'
1247 * in the userland vbi payload */
1248 if (UNSET
!= top_offset
)
1249 rp
= cx23885_risc_field(rp
, sglist
, top_offset
, 6,
1250 bpl
, padding
, lines
, 0);
1252 if (UNSET
!= bottom_offset
)
1253 rp
= cx23885_risc_field(rp
, sglist
, bottom_offset
, 0x207,
1254 bpl
, padding
, lines
, 0);
1258 /* save pointer to jmp instruction address */
1260 BUG_ON((risc
->jmp
- risc
->cpu
+ 2) * sizeof(*risc
->cpu
) > risc
->size
);
1265 int cx23885_risc_stopper(struct pci_dev
*pci
, struct btcx_riscmem
*risc
,
1266 u32 reg
, u32 mask
, u32 value
)
1271 rc
= btcx_riscmem_alloc(pci
, risc
, 4*16);
1275 /* write risc instructions */
1277 *(rp
++) = cpu_to_le32(RISC_WRITECR
| RISC_IRQ2
);
1278 *(rp
++) = cpu_to_le32(reg
);
1279 *(rp
++) = cpu_to_le32(value
);
1280 *(rp
++) = cpu_to_le32(mask
);
1281 *(rp
++) = cpu_to_le32(RISC_JUMP
);
1282 *(rp
++) = cpu_to_le32(risc
->dma
);
1283 *(rp
++) = cpu_to_le32(0); /* bits 63-32 */
1287 void cx23885_free_buffer(struct videobuf_queue
*q
, struct cx23885_buffer
*buf
)
1289 struct videobuf_dmabuf
*dma
= videobuf_to_dma(&buf
->vb
);
1291 BUG_ON(in_interrupt());
1292 videobuf_waiton(q
, &buf
->vb
, 0, 0);
1293 videobuf_dma_unmap(q
->dev
, dma
);
1294 videobuf_dma_free(dma
);
1295 btcx_riscmem_free(to_pci_dev(q
->dev
), &buf
->risc
);
1296 buf
->vb
.state
= VIDEOBUF_NEEDS_INIT
;
1299 static void cx23885_tsport_reg_dump(struct cx23885_tsport
*port
)
1301 struct cx23885_dev
*dev
= port
->dev
;
1303 dprintk(1, "%s() Register Dump\n", __func__
);
1304 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__
,
1305 cx_read(DEV_CNTRL2
));
1306 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__
,
1307 cx23885_irq_get_mask(dev
));
1308 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__
,
1309 cx_read(AUDIO_INT_INT_MSK
));
1310 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__
,
1311 cx_read(AUD_INT_DMA_CTL
));
1312 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__
,
1313 cx_read(AUDIO_EXT_INT_MSK
));
1314 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__
,
1315 cx_read(AUD_EXT_DMA_CTL
));
1316 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__
,
1318 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__
,
1319 cx_read(ALT_PIN_OUT_SEL
));
1320 dprintk(1, "%s() GPIO2 0x%08X\n", __func__
,
1322 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__
,
1323 port
->reg_gpcnt
, cx_read(port
->reg_gpcnt
));
1324 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__
,
1325 port
->reg_gpcnt_ctl
, cx_read(port
->reg_gpcnt_ctl
));
1326 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__
,
1327 port
->reg_dma_ctl
, cx_read(port
->reg_dma_ctl
));
1328 if (port
->reg_src_sel
)
1329 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__
,
1330 port
->reg_src_sel
, cx_read(port
->reg_src_sel
));
1331 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__
,
1332 port
->reg_lngth
, cx_read(port
->reg_lngth
));
1333 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__
,
1334 port
->reg_hw_sop_ctrl
, cx_read(port
->reg_hw_sop_ctrl
));
1335 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__
,
1336 port
->reg_gen_ctrl
, cx_read(port
->reg_gen_ctrl
));
1337 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__
,
1338 port
->reg_bd_pkt_status
, cx_read(port
->reg_bd_pkt_status
));
1339 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__
,
1340 port
->reg_sop_status
, cx_read(port
->reg_sop_status
));
1341 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__
,
1342 port
->reg_fifo_ovfl_stat
, cx_read(port
->reg_fifo_ovfl_stat
));
1343 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__
,
1344 port
->reg_vld_misc
, cx_read(port
->reg_vld_misc
));
1345 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__
,
1346 port
->reg_ts_clk_en
, cx_read(port
->reg_ts_clk_en
));
1347 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__
,
1348 port
->reg_ts_int_msk
, cx_read(port
->reg_ts_int_msk
));
1351 static int cx23885_start_dma(struct cx23885_tsport
*port
,
1352 struct cx23885_dmaqueue
*q
,
1353 struct cx23885_buffer
*buf
)
1355 struct cx23885_dev
*dev
= port
->dev
;
1358 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__
,
1359 buf
->vb
.width
, buf
->vb
.height
, buf
->vb
.field
);
1361 /* Stop the fifo and risc engine for this port */
1362 cx_clear(port
->reg_dma_ctl
, port
->dma_ctl_val
);
1364 /* setup fifo + format */
1365 cx23885_sram_channel_setup(dev
,
1366 &dev
->sram_channels
[port
->sram_chno
],
1367 port
->ts_packet_size
, buf
->risc
.dma
);
1369 cx23885_sram_channel_dump(dev
,
1370 &dev
->sram_channels
[port
->sram_chno
]);
1371 cx23885_risc_disasm(port
, &buf
->risc
);
1374 /* write TS length to chip */
1375 cx_write(port
->reg_lngth
, buf
->vb
.width
);
1377 if ((!(cx23885_boards
[dev
->board
].portb
& CX23885_MPEG_DVB
)) &&
1378 (!(cx23885_boards
[dev
->board
].portc
& CX23885_MPEG_DVB
))) {
1379 printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1381 cx23885_boards
[dev
->board
].portb
,
1382 cx23885_boards
[dev
->board
].portc
);
1386 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
)
1387 cx23885_av_clk(dev
, 0);
1391 /* If the port supports SRC SELECT, configure it */
1392 if (port
->reg_src_sel
)
1393 cx_write(port
->reg_src_sel
, port
->src_sel_val
);
1395 cx_write(port
->reg_hw_sop_ctrl
, port
->hw_sop_ctrl_val
);
1396 cx_write(port
->reg_ts_clk_en
, port
->ts_clk_en_val
);
1397 cx_write(port
->reg_vld_misc
, port
->vld_misc_val
);
1398 cx_write(port
->reg_gen_ctrl
, port
->gen_ctrl_val
);
1401 /* NOTE: this is 2 (reserved) for portb, does it matter? */
1402 /* reset counter to zero */
1403 cx_write(port
->reg_gpcnt_ctl
, 3);
1406 /* Set VIDB pins to input */
1407 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_DVB
) {
1408 reg
= cx_read(PAD_CTRL
);
1409 reg
&= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1410 cx_write(PAD_CTRL
, reg
);
1413 /* Set VIDC pins to input */
1414 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_DVB
) {
1415 reg
= cx_read(PAD_CTRL
);
1416 reg
&= ~0x4; /* Clear TS2_SOP_OE */
1417 cx_write(PAD_CTRL
, reg
);
1420 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
) {
1422 reg
= cx_read(PAD_CTRL
);
1423 reg
= reg
& ~0x1; /* Clear TS1_OE */
1425 /* FIXME, bit 2 writing here is questionable */
1426 /* set TS1_SOP_OE and TS1_OE_HI */
1428 cx_write(PAD_CTRL
, reg
);
1430 /* FIXME and these two registers should be documented. */
1431 cx_write(CLK_DELAY
, cx_read(CLK_DELAY
) | 0x80000011);
1432 cx_write(ALT_PIN_OUT_SEL
, 0x10100045);
1435 switch (dev
->bridge
) {
1436 case CX23885_BRIDGE_885
:
1437 case CX23885_BRIDGE_887
:
1438 case CX23885_BRIDGE_888
:
1440 dprintk(1, "%s() enabling TS int's and DMA\n", __func__
);
1441 cx_set(port
->reg_ts_int_msk
, port
->ts_int_msk_val
);
1442 cx_set(port
->reg_dma_ctl
, port
->dma_ctl_val
);
1443 cx23885_irq_add(dev
, port
->pci_irqmask
);
1444 cx23885_irq_enable_all(dev
);
1450 cx_set(DEV_CNTRL2
, (1<<5)); /* Enable RISC controller */
1452 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
)
1453 cx23885_av_clk(dev
, 1);
1456 cx23885_tsport_reg_dump(port
);
1461 static int cx23885_stop_dma(struct cx23885_tsport
*port
)
1463 struct cx23885_dev
*dev
= port
->dev
;
1466 dprintk(1, "%s()\n", __func__
);
1468 /* Stop interrupts and DMA */
1469 cx_clear(port
->reg_ts_int_msk
, port
->ts_int_msk_val
);
1470 cx_clear(port
->reg_dma_ctl
, port
->dma_ctl_val
);
1472 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
) {
1474 reg
= cx_read(PAD_CTRL
);
1479 /* clear TS1_SOP_OE and TS1_OE_HI */
1481 cx_write(PAD_CTRL
, reg
);
1482 cx_write(port
->reg_src_sel
, 0);
1483 cx_write(port
->reg_gen_ctrl
, 8);
1487 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
)
1488 cx23885_av_clk(dev
, 0);
1493 int cx23885_restart_queue(struct cx23885_tsport
*port
,
1494 struct cx23885_dmaqueue
*q
)
1496 struct cx23885_dev
*dev
= port
->dev
;
1497 struct cx23885_buffer
*buf
;
1499 dprintk(5, "%s()\n", __func__
);
1500 if (list_empty(&q
->active
)) {
1501 struct cx23885_buffer
*prev
;
1504 dprintk(5, "%s() queue is empty\n", __func__
);
1507 if (list_empty(&q
->queued
))
1509 buf
= list_entry(q
->queued
.next
, struct cx23885_buffer
,
1512 list_del(&buf
->vb
.queue
);
1513 list_add_tail(&buf
->vb
.queue
, &q
->active
);
1514 cx23885_start_dma(port
, q
, buf
);
1515 buf
->vb
.state
= VIDEOBUF_ACTIVE
;
1516 buf
->count
= q
->count
++;
1517 mod_timer(&q
->timeout
, jiffies
+BUFFER_TIMEOUT
);
1518 dprintk(5, "[%p/%d] restart_queue - f/active\n",
1521 } else if (prev
->vb
.width
== buf
->vb
.width
&&
1522 prev
->vb
.height
== buf
->vb
.height
&&
1523 prev
->fmt
== buf
->fmt
) {
1524 list_del(&buf
->vb
.queue
);
1525 list_add_tail(&buf
->vb
.queue
, &q
->active
);
1526 buf
->vb
.state
= VIDEOBUF_ACTIVE
;
1527 buf
->count
= q
->count
++;
1528 prev
->risc
.jmp
[1] = cpu_to_le32(buf
->risc
.dma
);
1529 /* 64 bit bits 63-32 */
1530 prev
->risc
.jmp
[2] = cpu_to_le32(0);
1531 dprintk(5, "[%p/%d] restart_queue - m/active\n",
1541 buf
= list_entry(q
->active
.next
, struct cx23885_buffer
, vb
.queue
);
1542 dprintk(2, "restart_queue [%p/%d]: restart dma\n",
1544 cx23885_start_dma(port
, q
, buf
);
1545 list_for_each_entry(buf
, &q
->active
, vb
.queue
)
1546 buf
->count
= q
->count
++;
1547 mod_timer(&q
->timeout
, jiffies
+ BUFFER_TIMEOUT
);
1551 /* ------------------------------------------------------------------ */
1553 int cx23885_buf_prepare(struct videobuf_queue
*q
, struct cx23885_tsport
*port
,
1554 struct cx23885_buffer
*buf
, enum v4l2_field field
)
1556 struct cx23885_dev
*dev
= port
->dev
;
1557 int size
= port
->ts_packet_size
* port
->ts_packet_count
;
1560 dprintk(1, "%s: %p\n", __func__
, buf
);
1561 if (0 != buf
->vb
.baddr
&& buf
->vb
.bsize
< size
)
1564 if (VIDEOBUF_NEEDS_INIT
== buf
->vb
.state
) {
1565 buf
->vb
.width
= port
->ts_packet_size
;
1566 buf
->vb
.height
= port
->ts_packet_count
;
1567 buf
->vb
.size
= size
;
1568 buf
->vb
.field
= field
/*V4L2_FIELD_TOP*/;
1570 rc
= videobuf_iolock(q
, &buf
->vb
, NULL
);
1573 cx23885_risc_databuffer(dev
->pci
, &buf
->risc
,
1574 videobuf_to_dma(&buf
->vb
)->sglist
,
1575 buf
->vb
.width
, buf
->vb
.height
, 0);
1577 buf
->vb
.state
= VIDEOBUF_PREPARED
;
1581 cx23885_free_buffer(q
, buf
);
1585 void cx23885_buf_queue(struct cx23885_tsport
*port
, struct cx23885_buffer
*buf
)
1587 struct cx23885_buffer
*prev
;
1588 struct cx23885_dev
*dev
= port
->dev
;
1589 struct cx23885_dmaqueue
*cx88q
= &port
->mpegq
;
1591 /* add jump to stopper */
1592 buf
->risc
.jmp
[0] = cpu_to_le32(RISC_JUMP
| RISC_IRQ1
| RISC_CNT_INC
);
1593 buf
->risc
.jmp
[1] = cpu_to_le32(cx88q
->stopper
.dma
);
1594 buf
->risc
.jmp
[2] = cpu_to_le32(0); /* bits 63-32 */
1596 if (list_empty(&cx88q
->active
)) {
1597 dprintk(1, "queue is empty - first active\n");
1598 list_add_tail(&buf
->vb
.queue
, &cx88q
->active
);
1599 cx23885_start_dma(port
, cx88q
, buf
);
1600 buf
->vb
.state
= VIDEOBUF_ACTIVE
;
1601 buf
->count
= cx88q
->count
++;
1602 mod_timer(&cx88q
->timeout
, jiffies
+ BUFFER_TIMEOUT
);
1603 dprintk(1, "[%p/%d] %s - first active\n",
1604 buf
, buf
->vb
.i
, __func__
);
1606 dprintk(1, "queue is not empty - append to active\n");
1607 prev
= list_entry(cx88q
->active
.prev
, struct cx23885_buffer
,
1609 list_add_tail(&buf
->vb
.queue
, &cx88q
->active
);
1610 buf
->vb
.state
= VIDEOBUF_ACTIVE
;
1611 buf
->count
= cx88q
->count
++;
1612 prev
->risc
.jmp
[1] = cpu_to_le32(buf
->risc
.dma
);
1613 prev
->risc
.jmp
[2] = cpu_to_le32(0); /* 64 bit bits 63-32 */
1614 dprintk(1, "[%p/%d] %s - append to active\n",
1615 buf
, buf
->vb
.i
, __func__
);
1619 /* ----------------------------------------------------------- */
1621 static void do_cancel_buffers(struct cx23885_tsport
*port
, char *reason
,
1624 struct cx23885_dev
*dev
= port
->dev
;
1625 struct cx23885_dmaqueue
*q
= &port
->mpegq
;
1626 struct cx23885_buffer
*buf
;
1627 unsigned long flags
;
1629 spin_lock_irqsave(&port
->slock
, flags
);
1630 while (!list_empty(&q
->active
)) {
1631 buf
= list_entry(q
->active
.next
, struct cx23885_buffer
,
1633 list_del(&buf
->vb
.queue
);
1634 buf
->vb
.state
= VIDEOBUF_ERROR
;
1635 wake_up(&buf
->vb
.done
);
1636 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1637 buf
, buf
->vb
.i
, reason
, (unsigned long)buf
->risc
.dma
);
1640 dprintk(1, "restarting queue\n");
1641 cx23885_restart_queue(port
, q
);
1643 spin_unlock_irqrestore(&port
->slock
, flags
);
1646 void cx23885_cancel_buffers(struct cx23885_tsport
*port
)
1648 struct cx23885_dev
*dev
= port
->dev
;
1649 struct cx23885_dmaqueue
*q
= &port
->mpegq
;
1651 dprintk(1, "%s()\n", __func__
);
1652 del_timer_sync(&q
->timeout
);
1653 cx23885_stop_dma(port
);
1654 do_cancel_buffers(port
, "cancel", 0);
1657 static void cx23885_timeout(unsigned long data
)
1659 struct cx23885_tsport
*port
= (struct cx23885_tsport
*)data
;
1660 struct cx23885_dev
*dev
= port
->dev
;
1662 dprintk(1, "%s()\n", __func__
);
1665 cx23885_sram_channel_dump(dev
,
1666 &dev
->sram_channels
[port
->sram_chno
]);
1668 cx23885_stop_dma(port
);
1669 do_cancel_buffers(port
, "timeout", 1);
1672 int cx23885_irq_417(struct cx23885_dev
*dev
, u32 status
)
1674 /* FIXME: port1 assumption here. */
1675 struct cx23885_tsport
*port
= &dev
->ts1
;
1682 count
= cx_read(port
->reg_gpcnt
);
1683 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1684 status
, cx_read(port
->reg_ts_int_msk
), count
);
1686 if ((status
& VID_B_MSK_BAD_PKT
) ||
1687 (status
& VID_B_MSK_OPC_ERR
) ||
1688 (status
& VID_B_MSK_VBI_OPC_ERR
) ||
1689 (status
& VID_B_MSK_SYNC
) ||
1690 (status
& VID_B_MSK_VBI_SYNC
) ||
1691 (status
& VID_B_MSK_OF
) ||
1692 (status
& VID_B_MSK_VBI_OF
)) {
1693 printk(KERN_ERR
"%s: V4L mpeg risc op code error, status "
1694 "= 0x%x\n", dev
->name
, status
);
1695 if (status
& VID_B_MSK_BAD_PKT
)
1696 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1697 if (status
& VID_B_MSK_OPC_ERR
)
1698 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1699 if (status
& VID_B_MSK_VBI_OPC_ERR
)
1700 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1701 if (status
& VID_B_MSK_SYNC
)
1702 dprintk(1, " VID_B_MSK_SYNC\n");
1703 if (status
& VID_B_MSK_VBI_SYNC
)
1704 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1705 if (status
& VID_B_MSK_OF
)
1706 dprintk(1, " VID_B_MSK_OF\n");
1707 if (status
& VID_B_MSK_VBI_OF
)
1708 dprintk(1, " VID_B_MSK_VBI_OF\n");
1710 cx_clear(port
->reg_dma_ctl
, port
->dma_ctl_val
);
1711 cx23885_sram_channel_dump(dev
,
1712 &dev
->sram_channels
[port
->sram_chno
]);
1713 cx23885_417_check_encoder(dev
);
1714 } else if (status
& VID_B_MSK_RISCI1
) {
1715 dprintk(7, " VID_B_MSK_RISCI1\n");
1716 spin_lock(&port
->slock
);
1717 cx23885_wakeup(port
, &port
->mpegq
, count
);
1718 spin_unlock(&port
->slock
);
1719 } else if (status
& VID_B_MSK_RISCI2
) {
1720 dprintk(7, " VID_B_MSK_RISCI2\n");
1721 spin_lock(&port
->slock
);
1722 cx23885_restart_queue(port
, &port
->mpegq
);
1723 spin_unlock(&port
->slock
);
1726 cx_write(port
->reg_ts_int_stat
, status
);
1733 static int cx23885_irq_ts(struct cx23885_tsport
*port
, u32 status
)
1735 struct cx23885_dev
*dev
= port
->dev
;
1739 if ((status
& VID_BC_MSK_OPC_ERR
) ||
1740 (status
& VID_BC_MSK_BAD_PKT
) ||
1741 (status
& VID_BC_MSK_SYNC
) ||
1742 (status
& VID_BC_MSK_OF
)) {
1744 if (status
& VID_BC_MSK_OPC_ERR
)
1745 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1746 VID_BC_MSK_OPC_ERR
);
1748 if (status
& VID_BC_MSK_BAD_PKT
)
1749 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1750 VID_BC_MSK_BAD_PKT
);
1752 if (status
& VID_BC_MSK_SYNC
)
1753 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1756 if (status
& VID_BC_MSK_OF
)
1757 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1760 printk(KERN_ERR
"%s: mpeg risc op code error\n", dev
->name
);
1762 cx_clear(port
->reg_dma_ctl
, port
->dma_ctl_val
);
1763 cx23885_sram_channel_dump(dev
,
1764 &dev
->sram_channels
[port
->sram_chno
]);
1766 } else if (status
& VID_BC_MSK_RISCI1
) {
1768 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1
);
1770 spin_lock(&port
->slock
);
1771 count
= cx_read(port
->reg_gpcnt
);
1772 cx23885_wakeup(port
, &port
->mpegq
, count
);
1773 spin_unlock(&port
->slock
);
1775 } else if (status
& VID_BC_MSK_RISCI2
) {
1777 dprintk(7, " (RISCI2 0x%08x)\n", VID_BC_MSK_RISCI2
);
1779 spin_lock(&port
->slock
);
1780 cx23885_restart_queue(port
, &port
->mpegq
);
1781 spin_unlock(&port
->slock
);
1785 cx_write(port
->reg_ts_int_stat
, status
);
1792 static irqreturn_t
cx23885_irq(int irq
, void *dev_id
)
1794 struct cx23885_dev
*dev
= dev_id
;
1795 struct cx23885_tsport
*ts1
= &dev
->ts1
;
1796 struct cx23885_tsport
*ts2
= &dev
->ts2
;
1797 u32 pci_status
, pci_mask
;
1798 u32 vida_status
, vida_mask
;
1799 u32 audint_status
, audint_mask
;
1800 u32 ts1_status
, ts1_mask
;
1801 u32 ts2_status
, ts2_mask
;
1802 int vida_count
= 0, ts1_count
= 0, ts2_count
= 0, handled
= 0;
1803 int audint_count
= 0;
1804 bool subdev_handled
;
1806 pci_status
= cx_read(PCI_INT_STAT
);
1807 pci_mask
= cx23885_irq_get_mask(dev
);
1808 vida_status
= cx_read(VID_A_INT_STAT
);
1809 vida_mask
= cx_read(VID_A_INT_MSK
);
1810 audint_status
= cx_read(AUDIO_INT_INT_STAT
);
1811 audint_mask
= cx_read(AUDIO_INT_INT_MSK
);
1812 ts1_status
= cx_read(VID_B_INT_STAT
);
1813 ts1_mask
= cx_read(VID_B_INT_MSK
);
1814 ts2_status
= cx_read(VID_C_INT_STAT
);
1815 ts2_mask
= cx_read(VID_C_INT_MSK
);
1817 if ((pci_status
== 0) && (ts2_status
== 0) && (ts1_status
== 0))
1820 vida_count
= cx_read(VID_A_GPCNT
);
1821 audint_count
= cx_read(AUD_INT_A_GPCNT
);
1822 ts1_count
= cx_read(ts1
->reg_gpcnt
);
1823 ts2_count
= cx_read(ts2
->reg_gpcnt
);
1824 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1825 pci_status
, pci_mask
);
1826 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1827 vida_status
, vida_mask
, vida_count
);
1828 dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1829 audint_status
, audint_mask
, audint_count
);
1830 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1831 ts1_status
, ts1_mask
, ts1_count
);
1832 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1833 ts2_status
, ts2_mask
, ts2_count
);
1835 if (pci_status
& (PCI_MSK_RISC_RD
| PCI_MSK_RISC_WR
|
1836 PCI_MSK_AL_RD
| PCI_MSK_AL_WR
| PCI_MSK_APB_DMA
|
1837 PCI_MSK_VID_C
| PCI_MSK_VID_B
| PCI_MSK_VID_A
|
1838 PCI_MSK_AUD_INT
| PCI_MSK_AUD_EXT
|
1839 PCI_MSK_GPIO0
| PCI_MSK_GPIO1
|
1840 PCI_MSK_AV_CORE
| PCI_MSK_IR
)) {
1842 if (pci_status
& PCI_MSK_RISC_RD
)
1843 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1846 if (pci_status
& PCI_MSK_RISC_WR
)
1847 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1850 if (pci_status
& PCI_MSK_AL_RD
)
1851 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1854 if (pci_status
& PCI_MSK_AL_WR
)
1855 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1858 if (pci_status
& PCI_MSK_APB_DMA
)
1859 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1862 if (pci_status
& PCI_MSK_VID_C
)
1863 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1866 if (pci_status
& PCI_MSK_VID_B
)
1867 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1870 if (pci_status
& PCI_MSK_VID_A
)
1871 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1874 if (pci_status
& PCI_MSK_AUD_INT
)
1875 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1878 if (pci_status
& PCI_MSK_AUD_EXT
)
1879 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1882 if (pci_status
& PCI_MSK_GPIO0
)
1883 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1886 if (pci_status
& PCI_MSK_GPIO1
)
1887 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1890 if (pci_status
& PCI_MSK_AV_CORE
)
1891 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1894 if (pci_status
& PCI_MSK_IR
)
1895 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1899 if (cx23885_boards
[dev
->board
].ci_type
== 1 &&
1900 (pci_status
& (PCI_MSK_GPIO1
| PCI_MSK_GPIO0
)))
1901 handled
+= netup_ci_slot_status(dev
, pci_status
);
1903 if (cx23885_boards
[dev
->board
].ci_type
== 2 &&
1904 (pci_status
& PCI_MSK_GPIO0
))
1905 handled
+= altera_ci_irq(dev
);
1908 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_DVB
)
1909 handled
+= cx23885_irq_ts(ts1
, ts1_status
);
1911 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
)
1912 handled
+= cx23885_irq_417(dev
, ts1_status
);
1916 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_DVB
)
1917 handled
+= cx23885_irq_ts(ts2
, ts2_status
);
1919 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_ENCODER
)
1920 handled
+= cx23885_irq_417(dev
, ts2_status
);
1924 handled
+= cx23885_video_irq(dev
, vida_status
);
1927 handled
+= cx23885_audio_irq(dev
, audint_status
, audint_mask
);
1929 if (pci_status
& PCI_MSK_IR
) {
1930 subdev_handled
= false;
1931 v4l2_subdev_call(dev
->sd_ir
, core
, interrupt_service_routine
,
1932 pci_status
, &subdev_handled
);
1937 if ((pci_status
& pci_mask
) & PCI_MSK_AV_CORE
) {
1938 cx23885_irq_disable(dev
, PCI_MSK_AV_CORE
);
1939 if (!schedule_work(&dev
->cx25840_work
))
1940 printk(KERN_ERR
"%s: failed to set up deferred work for"
1941 " AV Core/IR interrupt. Interrupt is disabled"
1942 " and won't be re-enabled\n", dev
->name
);
1947 cx_write(PCI_INT_STAT
, pci_status
);
1949 return IRQ_RETVAL(handled
);
1952 static void cx23885_v4l2_dev_notify(struct v4l2_subdev
*sd
,
1953 unsigned int notification
, void *arg
)
1955 struct cx23885_dev
*dev
;
1960 dev
= to_cx23885(sd
->v4l2_dev
);
1962 switch (notification
) {
1963 case V4L2_SUBDEV_IR_RX_NOTIFY
: /* Possibly called in an IRQ context */
1964 if (sd
== dev
->sd_ir
)
1965 cx23885_ir_rx_v4l2_dev_notify(sd
, *(u32
*)arg
);
1967 case V4L2_SUBDEV_IR_TX_NOTIFY
: /* Possibly called in an IRQ context */
1968 if (sd
== dev
->sd_ir
)
1969 cx23885_ir_tx_v4l2_dev_notify(sd
, *(u32
*)arg
);
1974 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev
*dev
)
1976 INIT_WORK(&dev
->cx25840_work
, cx23885_av_work_handler
);
1977 INIT_WORK(&dev
->ir_rx_work
, cx23885_ir_rx_work_handler
);
1978 INIT_WORK(&dev
->ir_tx_work
, cx23885_ir_tx_work_handler
);
1979 dev
->v4l2_dev
.notify
= cx23885_v4l2_dev_notify
;
1982 static inline int encoder_on_portb(struct cx23885_dev
*dev
)
1984 return cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
;
1987 static inline int encoder_on_portc(struct cx23885_dev
*dev
)
1989 return cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_ENCODER
;
1992 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1993 * registers depending on the board configuration (and whether the
1994 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1995 * be pushed into the correct hardware register, regardless of the
1996 * physical location. Certain registers are shared so we sanity check
1997 * and report errors if we think we're tampering with a GPIo that might
1998 * be assigned to the encoder (and used for the host bus).
2000 * GPIO 2 thru 0 - On the cx23885 bridge
2001 * GPIO 18 thru 3 - On the cx23417 host bus interface
2002 * GPIO 23 thru 19 - On the cx25840 a/v core
2004 void cx23885_gpio_set(struct cx23885_dev
*dev
, u32 mask
)
2007 cx_set(GP0_IO
, mask
& 0x7);
2009 if (mask
& 0x0007fff8) {
2010 if (encoder_on_portb(dev
) || encoder_on_portc(dev
))
2012 "%s: Setting GPIO on encoder ports\n",
2014 cx_set(MC417_RWD
, (mask
& 0x0007fff8) >> 3);
2018 if (mask
& 0x00f80000)
2019 printk(KERN_INFO
"%s: Unsupported\n", dev
->name
);
2022 void cx23885_gpio_clear(struct cx23885_dev
*dev
, u32 mask
)
2024 if (mask
& 0x00000007)
2025 cx_clear(GP0_IO
, mask
& 0x7);
2027 if (mask
& 0x0007fff8) {
2028 if (encoder_on_portb(dev
) || encoder_on_portc(dev
))
2030 "%s: Clearing GPIO moving on encoder ports\n",
2032 cx_clear(MC417_RWD
, (mask
& 0x7fff8) >> 3);
2036 if (mask
& 0x00f80000)
2037 printk(KERN_INFO
"%s: Unsupported\n", dev
->name
);
2040 u32
cx23885_gpio_get(struct cx23885_dev
*dev
, u32 mask
)
2042 if (mask
& 0x00000007)
2043 return (cx_read(GP0_IO
) >> 8) & mask
& 0x7;
2045 if (mask
& 0x0007fff8) {
2046 if (encoder_on_portb(dev
) || encoder_on_portc(dev
))
2048 "%s: Reading GPIO moving on encoder ports\n",
2050 return (cx_read(MC417_RWD
) & ((mask
& 0x7fff8) >> 3)) << 3;
2054 if (mask
& 0x00f80000)
2055 printk(KERN_INFO
"%s: Unsupported\n", dev
->name
);
2060 void cx23885_gpio_enable(struct cx23885_dev
*dev
, u32 mask
, int asoutput
)
2062 if ((mask
& 0x00000007) && asoutput
)
2063 cx_set(GP0_IO
, (mask
& 0x7) << 16);
2064 else if ((mask
& 0x00000007) && !asoutput
)
2065 cx_clear(GP0_IO
, (mask
& 0x7) << 16);
2067 if (mask
& 0x0007fff8) {
2068 if (encoder_on_portb(dev
) || encoder_on_portc(dev
))
2070 "%s: Enabling GPIO on encoder ports\n",
2074 /* MC417_OEN is active low for output, write 1 for an input */
2075 if ((mask
& 0x0007fff8) && asoutput
)
2076 cx_clear(MC417_OEN
, (mask
& 0x7fff8) >> 3);
2078 else if ((mask
& 0x0007fff8) && !asoutput
)
2079 cx_set(MC417_OEN
, (mask
& 0x7fff8) >> 3);
2084 static int __devinit
cx23885_initdev(struct pci_dev
*pci_dev
,
2085 const struct pci_device_id
*pci_id
)
2087 struct cx23885_dev
*dev
;
2090 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
2094 err
= v4l2_device_register(&pci_dev
->dev
, &dev
->v4l2_dev
);
2098 /* Prepare to handle notifications from subdevices */
2099 cx23885_v4l2_dev_notify_init(dev
);
2103 if (pci_enable_device(pci_dev
)) {
2108 if (cx23885_dev_setup(dev
) < 0) {
2113 /* print pci info */
2114 dev
->pci_rev
= pci_dev
->revision
;
2115 pci_read_config_byte(pci_dev
, PCI_LATENCY_TIMER
, &dev
->pci_lat
);
2116 printk(KERN_INFO
"%s/0: found at %s, rev: %d, irq: %d, "
2117 "latency: %d, mmio: 0x%llx\n", dev
->name
,
2118 pci_name(pci_dev
), dev
->pci_rev
, pci_dev
->irq
,
2120 (unsigned long long)pci_resource_start(pci_dev
, 0));
2122 pci_set_master(pci_dev
);
2123 if (!pci_dma_supported(pci_dev
, 0xffffffff)) {
2124 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev
->name
);
2129 err
= request_irq(pci_dev
->irq
, cx23885_irq
,
2130 IRQF_SHARED
| IRQF_DISABLED
, dev
->name
, dev
);
2132 printk(KERN_ERR
"%s: can't get IRQ %d\n",
2133 dev
->name
, pci_dev
->irq
);
2137 switch (dev
->board
) {
2138 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI
:
2139 cx23885_irq_add_enable(dev
, PCI_MSK_GPIO1
| PCI_MSK_GPIO0
);
2141 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF
:
2142 cx23885_irq_add_enable(dev
, PCI_MSK_GPIO0
);
2147 * The CX2388[58] IR controller can start firing interrupts when
2148 * enabled, so these have to take place after the cx23885_irq() handler
2149 * is hooked up by the call to request_irq() above.
2151 cx23885_ir_pci_int_enable(dev
);
2152 cx23885_input_init(dev
);
2157 cx23885_dev_unregister(dev
);
2159 v4l2_device_unregister(&dev
->v4l2_dev
);
2165 static void __devexit
cx23885_finidev(struct pci_dev
*pci_dev
)
2167 struct v4l2_device
*v4l2_dev
= pci_get_drvdata(pci_dev
);
2168 struct cx23885_dev
*dev
= to_cx23885(v4l2_dev
);
2170 cx23885_input_fini(dev
);
2171 cx23885_ir_fini(dev
);
2173 cx23885_shutdown(dev
);
2175 pci_disable_device(pci_dev
);
2177 /* unregister stuff */
2178 free_irq(pci_dev
->irq
, dev
);
2180 cx23885_dev_unregister(dev
);
2181 v4l2_device_unregister(v4l2_dev
);
2185 static struct pci_device_id cx23885_pci_tbl
[] = {
2190 .subvendor
= PCI_ANY_ID
,
2191 .subdevice
= PCI_ANY_ID
,
2196 .subvendor
= PCI_ANY_ID
,
2197 .subdevice
= PCI_ANY_ID
,
2199 /* --- end of list --- */
2202 MODULE_DEVICE_TABLE(pci
, cx23885_pci_tbl
);
2204 static struct pci_driver cx23885_pci_driver
= {
2206 .id_table
= cx23885_pci_tbl
,
2207 .probe
= cx23885_initdev
,
2208 .remove
= __devexit_p(cx23885_finidev
),
2214 static int __init
cx23885_init(void)
2216 printk(KERN_INFO
"cx23885 driver version %s loaded\n",
2218 return pci_register_driver(&cx23885_pci_driver
);
2221 static void __exit
cx23885_fini(void)
2223 pci_unregister_driver(&cx23885_pci_driver
);
2226 module_init(cx23885_init
);
2227 module_exit(cx23885_fini
);