2 * Driver for the Conexant CX23885 PCIe bridge
4 * Copyright (c) 2006 Steven Toth <stoth@hauppauge.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/kmod.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <asm/div64.h>
35 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
36 MODULE_AUTHOR("Steven Toth <stoth@hauppauge.com>");
37 MODULE_LICENSE("GPL");
39 static unsigned int debug
= 0;
40 module_param(debug
,int,0644);
41 MODULE_PARM_DESC(debug
,"enable debug messages");
43 static unsigned int card
[] = {[0 ... (CX23885_MAXBOARDS
- 1)] = UNSET
};
44 module_param_array(card
, int, NULL
, 0444);
45 MODULE_PARM_DESC(card
,"card type");
47 #define dprintk(level,fmt, arg...) if (debug >= level) \
48 printk(KERN_DEBUG "%s/0: " fmt, dev->name , ## arg)
50 static unsigned int cx23885_devcount
;
52 static DEFINE_MUTEX(devlist
);
53 static LIST_HEAD(cx23885_devlist
);
55 #define NO_SYNC_LINE (-1U)
59 * 1 line = 16 bytes of CDT
61 * cdt size = 16 * linesize
66 * 0x00000000 0x00008fff FIFO clusters
67 * 0x00010000 0x000104af Channel Management Data Structures
68 * 0x000104b0 0x000104ff Free
69 * 0x00010500 0x000108bf 15 channels * iqsize
70 * 0x000108c0 0x000108ff Free
71 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
72 * 15 channels * (iqsize + (maxlines * linesize))
73 * 0x00010ea0 0x00010xxx Free
76 struct sram_channel cx23885_sram_channels
[] = {
79 .cmds_start
= 0x10000,
80 .ctrl_start
= 0x10500,
84 .ptr1_reg
= DMA1_PTR1
,
85 .ptr2_reg
= DMA1_PTR2
,
86 .cnt1_reg
= DMA1_CNT1
,
87 .cnt2_reg
= DMA1_CNT2
,
97 .ptr1_reg
= DMA2_PTR1
,
98 .ptr2_reg
= DMA2_PTR2
,
99 .cnt1_reg
= DMA2_CNT1
,
100 .cnt2_reg
= DMA2_CNT2
,
104 .cmds_start
= 0x100A0,
105 .ctrl_start
= 0x10780,
107 .fifo_start
= 0x5000,
109 .ptr1_reg
= DMA3_PTR1
,
110 .ptr2_reg
= DMA3_PTR2
,
111 .cnt1_reg
= DMA3_CNT1
,
112 .cnt2_reg
= DMA3_CNT2
,
121 .ptr1_reg
= DMA4_PTR1
,
122 .ptr2_reg
= DMA4_PTR2
,
123 .cnt1_reg
= DMA4_CNT1
,
124 .cnt2_reg
= DMA4_CNT2
,
133 .ptr1_reg
= DMA5_PTR1
,
134 .ptr2_reg
= DMA5_PTR2
,
135 .cnt1_reg
= DMA5_CNT1
,
136 .cnt2_reg
= DMA5_CNT2
,
140 .cmds_start
= 0x10140,
141 .ctrl_start
= 0x10680,
143 .fifo_start
= 0x6000,
145 .ptr1_reg
= DMA5_PTR1
,
146 .ptr2_reg
= DMA5_PTR2
,
147 .cnt1_reg
= DMA5_CNT1
,
148 .cnt2_reg
= DMA5_CNT2
,
157 .ptr1_reg
= DMA6_PTR1
,
158 .ptr2_reg
= DMA6_PTR2
,
159 .cnt1_reg
= DMA6_CNT1
,
160 .cnt2_reg
= DMA6_CNT2
,
169 .ptr1_reg
= DMA7_PTR1
,
170 .ptr2_reg
= DMA7_PTR2
,
171 .cnt1_reg
= DMA7_CNT1
,
172 .cnt2_reg
= DMA7_CNT2
,
181 .ptr1_reg
= DMA8_PTR1
,
182 .ptr2_reg
= DMA8_PTR2
,
183 .cnt1_reg
= DMA8_CNT1
,
184 .cnt2_reg
= DMA8_CNT2
,
188 /* FIXME, these allocations will change when
189 * analog arrives. The be reviewed.
190 * CX23887 Assumptions
191 * 1 line = 16 bytes of CDT
193 * cdt size = 16 * linesize
198 * 0x00000000 0x00008fff FIFO clusters
199 * 0x00010000 0x000104af Channel Management Data Structures
200 * 0x000104b0 0x000104ff Free
201 * 0x00010500 0x000108bf 15 channels * iqsize
202 * 0x000108c0 0x000108ff Free
203 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
204 * 15 channels * (iqsize + (maxlines * linesize))
205 * 0x00010ea0 0x00010xxx Free
208 struct sram_channel cx23887_sram_channels
[] = {
216 .ptr1_reg
= DMA1_PTR1
,
217 .ptr2_reg
= DMA1_PTR2
,
218 .cnt1_reg
= DMA1_CNT1
,
219 .cnt2_reg
= DMA1_CNT2
,
228 .ptr1_reg
= DMA2_PTR1
,
229 .ptr2_reg
= DMA2_PTR2
,
230 .cnt1_reg
= DMA2_CNT1
,
231 .cnt2_reg
= DMA2_CNT2
,
240 .ptr1_reg
= DMA3_PTR1
,
241 .ptr2_reg
= DMA3_PTR2
,
242 .cnt1_reg
= DMA3_CNT1
,
243 .cnt2_reg
= DMA3_CNT2
,
252 .ptr1_reg
= DMA4_PTR1
,
253 .ptr2_reg
= DMA4_PTR2
,
254 .cnt1_reg
= DMA4_CNT1
,
255 .cnt2_reg
= DMA4_CNT2
,
264 .ptr1_reg
= DMA5_PTR1
,
265 .ptr2_reg
= DMA5_PTR2
,
266 .cnt1_reg
= DMA5_CNT1
,
267 .cnt2_reg
= DMA5_CNT2
,
271 .cmds_start
= 0x10140,
272 .ctrl_start
= 0x10680,
274 .fifo_start
= 0x6000,
276 .ptr1_reg
= DMA5_PTR1
,
277 .ptr2_reg
= DMA5_PTR2
,
278 .cnt1_reg
= DMA5_CNT1
,
279 .cnt2_reg
= DMA5_CNT2
,
288 .ptr1_reg
= DMA6_PTR1
,
289 .ptr2_reg
= DMA6_PTR2
,
290 .cnt1_reg
= DMA6_CNT1
,
291 .cnt2_reg
= DMA6_CNT2
,
300 .ptr1_reg
= DMA7_PTR1
,
301 .ptr2_reg
= DMA7_PTR2
,
302 .cnt1_reg
= DMA7_CNT1
,
303 .cnt2_reg
= DMA7_CNT2
,
312 .ptr1_reg
= DMA8_PTR1
,
313 .ptr2_reg
= DMA8_PTR2
,
314 .cnt1_reg
= DMA8_CNT1
,
315 .cnt2_reg
= DMA8_CNT2
,
319 static int cx23885_risc_decode(u32 risc
)
321 static char *instr
[16] = {
322 [ RISC_SYNC
>> 28 ] = "sync",
323 [ RISC_WRITE
>> 28 ] = "write",
324 [ RISC_WRITEC
>> 28 ] = "writec",
325 [ RISC_READ
>> 28 ] = "read",
326 [ RISC_READC
>> 28 ] = "readc",
327 [ RISC_JUMP
>> 28 ] = "jump",
328 [ RISC_SKIP
>> 28 ] = "skip",
329 [ RISC_WRITERM
>> 28 ] = "writerm",
330 [ RISC_WRITECM
>> 28 ] = "writecm",
331 [ RISC_WRITECR
>> 28 ] = "writecr",
333 static int incr
[16] = {
334 [ RISC_WRITE
>> 28 ] = 3,
335 [ RISC_JUMP
>> 28 ] = 3,
336 [ RISC_SKIP
>> 28 ] = 1,
337 [ RISC_SYNC
>> 28 ] = 1,
338 [ RISC_WRITERM
>> 28 ] = 3,
339 [ RISC_WRITECM
>> 28 ] = 3,
340 [ RISC_WRITECR
>> 28 ] = 4,
342 static char *bits
[] = {
343 "12", "13", "14", "resync",
344 "cnt0", "cnt1", "18", "19",
345 "20", "21", "22", "23",
346 "irq1", "irq2", "eol", "sol",
350 printk("0x%08x [ %s", risc
,
351 instr
[risc
>> 28] ? instr
[risc
>> 28] : "INVALID");
352 for (i
= ARRAY_SIZE(bits
) - 1; i
>= 0; i
--)
353 if (risc
& (1 << (i
+ 12)))
354 printk(" %s", bits
[i
]);
355 printk(" count=%d ]\n", risc
& 0xfff);
356 return incr
[risc
>> 28] ? incr
[risc
>> 28] : 1;
359 void cx23885_wakeup(struct cx23885_tsport
*port
,
360 struct cx23885_dmaqueue
*q
, u32 count
)
362 struct cx23885_dev
*dev
= port
->dev
;
363 struct cx23885_buffer
*buf
;
366 for (bc
= 0;; bc
++) {
367 if (list_empty(&q
->active
))
369 buf
= list_entry(q
->active
.next
,
370 struct cx23885_buffer
, vb
.queue
);
372 /* count comes from the hw and is is 16bit wide --
373 * this trick handles wrap-arounds correctly for
374 * up to 32767 buffers in flight... */
375 if ((s16
) (count
- buf
->count
) < 0)
378 do_gettimeofday(&buf
->vb
.ts
);
379 dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf
, buf
->vb
.i
,
381 buf
->vb
.state
= STATE_DONE
;
382 list_del(&buf
->vb
.queue
);
383 wake_up(&buf
->vb
.done
);
385 if (list_empty(&q
->active
)) {
386 del_timer(&q
->timeout
);
388 mod_timer(&q
->timeout
, jiffies
+ BUFFER_TIMEOUT
);
391 printk("%s: %d buffers handled (should be 1)\n",
394 void cx23885_sram_channel_dump(struct cx23885_dev
*dev
,
395 struct sram_channel
*ch
);
397 int cx23885_sram_channel_setup(struct cx23885_dev
*dev
,
398 struct sram_channel
*ch
,
399 unsigned int bpl
, u32 risc
)
401 unsigned int i
, lines
;
404 if (ch
->cmds_start
== 0)
406 dprintk(1, "%s() Erasing channel [%s]\n", __FUNCTION__
,
408 cx_write(ch
->ptr1_reg
, 0);
409 cx_write(ch
->ptr2_reg
, 0);
410 cx_write(ch
->cnt2_reg
, 0);
411 cx_write(ch
->cnt1_reg
, 0);
414 dprintk(1, "%s() Configuring channel [%s]\n", __FUNCTION__
,
418 bpl
= (bpl
+ 7) & ~7; /* alignment */
420 lines
= ch
->fifo_size
/ bpl
;
425 cx_write(8 + 0, cpu_to_le32(RISC_JUMP
| RISC_IRQ1
| RISC_CNT_INC
) );
426 cx_write(8 + 4, cpu_to_le32(8) );
427 cx_write(8 + 8, cpu_to_le32(0) );
430 for (i
= 0; i
< lines
; i
++) {
431 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __FUNCTION__
, cdt
+ 16*i
,
432 ch
->fifo_start
+ bpl
*i
);
433 cx_write(cdt
+ 16*i
, ch
->fifo_start
+ bpl
*i
);
434 cx_write(cdt
+ 16*i
+ 4, 0);
435 cx_write(cdt
+ 16*i
+ 8, 0);
436 cx_write(cdt
+ 16*i
+ 12, 0);
441 cx_write(ch
->cmds_start
+ 0, 8);
443 cx_write(ch
->cmds_start
+ 0, risc
);
444 cx_write(ch
->cmds_start
+ 4, 0); /* 64 bits 63-32 */
445 cx_write(ch
->cmds_start
+ 8, cdt
);
446 cx_write(ch
->cmds_start
+ 12, (lines
*16) >> 3);
447 cx_write(ch
->cmds_start
+ 16, ch
->ctrl_start
);
449 cx_write(ch
->cmds_start
+ 20, 0x80000000 | (64 >> 2) );
451 cx_write(ch
->cmds_start
+ 20, 64 >> 2);
452 for (i
= 24; i
< 80; i
+= 4)
453 cx_write(ch
->cmds_start
+ i
, 0);
456 cx_write(ch
->ptr1_reg
, ch
->fifo_start
);
457 cx_write(ch
->ptr2_reg
, cdt
);
458 cx_write(ch
->cnt2_reg
, (lines
*16) >> 3);
459 cx_write(ch
->cnt1_reg
, (bpl
>> 3) -1);
461 dprintk(2,"[bridge %d] sram setup %s: bpl=%d lines=%d\n",
470 void cx23885_sram_channel_dump(struct cx23885_dev
*dev
,
471 struct sram_channel
*ch
)
473 static char *name
[] = {
490 unsigned int i
, j
, n
;
492 printk("%s: %s - dma channel status dump\n",
493 dev
->name
, ch
->name
);
494 for (i
= 0; i
< ARRAY_SIZE(name
); i
++)
495 printk("%s: cmds: %-15s: 0x%08x\n",
497 cx_read(ch
->cmds_start
+ 4*i
));
499 for (i
= 0; i
< 4; i
++) {
500 risc
= cx_read(ch
->cmds_start
+ 4 * (i
+ 14));
501 printk("%s: risc%d: ", dev
->name
, i
);
502 cx23885_risc_decode(risc
);
504 for (i
= 0; i
< (64 >> 2); i
+= n
) {
505 risc
= cx_read(ch
->ctrl_start
+ 4 * i
);
506 /* No consideration for bits 63-32 */
508 printk("%s: (0x%08x) iq %x: ", dev
->name
,
509 ch
->ctrl_start
+ 4 * i
, i
);
510 n
= cx23885_risc_decode(risc
);
511 for (j
= 1; j
< n
; j
++) {
512 risc
= cx_read(ch
->ctrl_start
+ 4 * (i
+ j
));
513 printk("%s: iq %x: 0x%08x [ arg #%d ]\n",
514 dev
->name
, i
+j
, risc
, j
);
518 printk("%s: fifo: 0x%08x -> 0x%x\n",
519 dev
->name
, ch
->fifo_start
, ch
->fifo_start
+ch
->fifo_size
);
520 printk("%s: ctrl: 0x%08x -> 0x%x\n",
521 dev
->name
, ch
->ctrl_start
, ch
->ctrl_start
+ 6*16);
522 printk("%s: ptr1_reg: 0x%08x\n",
523 dev
->name
, cx_read(ch
->ptr1_reg
));
524 printk("%s: ptr2_reg: 0x%08x\n",
525 dev
->name
, cx_read(ch
->ptr2_reg
));
526 printk("%s: cnt1_reg: 0x%08x\n",
527 dev
->name
, cx_read(ch
->cnt1_reg
));
528 printk("%s: cnt2_reg: 0x%08x\n",
529 dev
->name
, cx_read(ch
->cnt2_reg
));
532 void cx23885_risc_disasm(struct cx23885_tsport
*port
,
533 struct btcx_riscmem
*risc
)
535 struct cx23885_dev
*dev
= port
->dev
;
536 unsigned int i
, j
, n
;
538 printk("%s: risc disasm: %p [dma=0x%08lx]\n",
539 dev
->name
, risc
->cpu
, (unsigned long)risc
->dma
);
540 for (i
= 0; i
< (risc
->size
>> 2); i
+= n
) {
541 printk("%s: %04d: ", dev
->name
, i
);
542 n
= cx23885_risc_decode(risc
->cpu
[i
]);
543 for (j
= 1; j
< n
; j
++)
544 printk("%s: %04d: 0x%08x [ arg #%d ]\n",
545 dev
->name
, i
+ j
, risc
->cpu
[i
+ j
], j
);
546 if (risc
->cpu
[i
] == RISC_JUMP
)
551 void cx23885_shutdown(struct cx23885_dev
*dev
)
553 /* disable RISC controller */
554 cx_write(DEV_CNTRL2
, 0);
556 /* Disable all IR activity */
557 cx_write(IR_CNTRL_REG
, 0);
559 /* Disable Video A/B activity */
560 cx_write(VID_A_DMA_CTL
, 0);
561 cx_write(VID_B_DMA_CTL
, 0);
562 cx_write(VID_C_DMA_CTL
, 0);
564 /* Disable Audio activity */
565 cx_write(AUD_INT_DMA_CTL
, 0);
566 cx_write(AUD_EXT_DMA_CTL
, 0);
568 /* Disable Serial port */
569 cx_write(UART_CTL
, 0);
571 /* Disable Interrupts */
572 cx_write(PCI_INT_MSK
, 0);
573 cx_write(VID_A_INT_MSK
, 0);
574 cx_write(VID_B_INT_MSK
, 0);
575 cx_write(VID_C_INT_MSK
, 0);
576 cx_write(AUDIO_INT_INT_MSK
, 0);
577 cx_write(AUDIO_EXT_INT_MSK
, 0);
581 void cx23885_reset(struct cx23885_dev
*dev
)
583 dprintk(1, "%s()\n", __FUNCTION__
);
585 cx23885_shutdown(dev
);
587 cx_write(PCI_INT_STAT
, 0xffffffff);
588 cx_write(VID_A_INT_STAT
, 0xffffffff);
589 cx_write(VID_B_INT_STAT
, 0xffffffff);
590 cx_write(VID_C_INT_STAT
, 0xffffffff);
591 cx_write(AUDIO_INT_INT_STAT
, 0xffffffff);
592 cx_write(AUDIO_EXT_INT_STAT
, 0xffffffff);
593 cx_write(CLK_DELAY
, cx_read(CLK_DELAY
) & 0x80000000);
597 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[ SRAM_CH01
], 188*4, 0);
598 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[ SRAM_CH02
], 128, 0);
599 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[ SRAM_CH03
], 188*4, 0);
600 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[ SRAM_CH04
], 128, 0);
601 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[ SRAM_CH05
], 128, 0);
602 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[ SRAM_CH06
], 188*4, 0);
603 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[ SRAM_CH07
], 128, 0);
604 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[ SRAM_CH08
], 128, 0);
605 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[ SRAM_CH09
], 128, 0);
607 cx23885_gpio_setup(dev
);
611 static int cx23885_pci_quirks(struct cx23885_dev
*dev
)
613 dprintk(1, "%s()\n", __FUNCTION__
);
615 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
616 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
617 * occur on the cx23887 bridge.
619 if(dev
->bridge
== CX23885_BRIDGE_885
)
620 cx_clear(RDR_TLCTL0
, 1 << 4);
625 static int get_resources(struct cx23885_dev
*dev
)
627 if (request_mem_region(pci_resource_start(dev
->pci
,0),
628 pci_resource_len(dev
->pci
,0),
632 printk(KERN_ERR
"%s: can't get MMIO memory @ 0x%llx\n",
633 dev
->name
, (unsigned long long)pci_resource_start(dev
->pci
,0));
638 static void cx23885_timeout(unsigned long data
);
639 int cx23885_risc_stopper(struct pci_dev
*pci
, struct btcx_riscmem
*risc
,
640 u32 reg
, u32 mask
, u32 value
);
642 static int cx23885_init_tsport(struct cx23885_dev
*dev
, struct cx23885_tsport
*port
, int portno
)
644 dprintk(1, "%s(portno=%d)\n", __FUNCTION__
, portno
);
646 /* Transport bus init dma queue - Common settings */
647 port
->dma_ctl_val
= 0x11; /* Enable RISC controller and Fifo */
648 port
->ts_int_msk_val
= 0x1111; /* TS port bits for RISC */
650 spin_lock_init(&port
->slock
);
654 INIT_LIST_HEAD(&port
->mpegq
.active
);
655 INIT_LIST_HEAD(&port
->mpegq
.queued
);
656 port
->mpegq
.timeout
.function
= cx23885_timeout
;
657 port
->mpegq
.timeout
.data
= (unsigned long)port
;
658 init_timer(&port
->mpegq
.timeout
);
662 port
->reg_gpcnt
= VID_B_GPCNT
;
663 port
->reg_gpcnt_ctl
= VID_B_GPCNT_CTL
;
664 port
->reg_dma_ctl
= VID_B_DMA_CTL
;
665 port
->reg_lngth
= VID_B_LNGTH
;
666 port
->reg_hw_sop_ctrl
= VID_B_HW_SOP_CTL
;
667 port
->reg_gen_ctrl
= VID_B_GEN_CTL
;
668 port
->reg_bd_pkt_status
= VID_B_BD_PKT_STATUS
;
669 port
->reg_sop_status
= VID_B_SOP_STATUS
;
670 port
->reg_fifo_ovfl_stat
= VID_B_FIFO_OVFL_STAT
;
671 port
->reg_vld_misc
= VID_B_VLD_MISC
;
672 port
->reg_ts_clk_en
= VID_B_TS_CLK_EN
;
673 port
->reg_src_sel
= VID_B_SRC_SEL
;
674 port
->reg_ts_int_msk
= VID_B_INT_MSK
;
675 port
->reg_ts_int_stat
= VID_B_INT_STAT
;
676 port
->sram_chno
= SRAM_CH03
; /* VID_B */
677 port
->pci_irqmask
= 0x02; /* VID_B bit1 */
680 port
->reg_gpcnt
= VID_C_GPCNT
;
681 port
->reg_gpcnt_ctl
= VID_C_GPCNT_CTL
;
682 port
->reg_dma_ctl
= VID_C_DMA_CTL
;
683 port
->reg_lngth
= VID_C_LNGTH
;
684 port
->reg_hw_sop_ctrl
= VID_C_HW_SOP_CTL
;
685 port
->reg_gen_ctrl
= VID_C_GEN_CTL
;
686 port
->reg_bd_pkt_status
= VID_C_BD_PKT_STATUS
;
687 port
->reg_sop_status
= VID_C_SOP_STATUS
;
688 port
->reg_fifo_ovfl_stat
= VID_C_FIFO_OVFL_STAT
;
689 port
->reg_vld_misc
= VID_C_VLD_MISC
;
690 port
->reg_ts_clk_en
= VID_C_TS_CLK_EN
;
691 port
->reg_src_sel
= 0;
692 port
->reg_ts_int_msk
= VID_C_INT_MSK
;
693 port
->reg_ts_int_stat
= VID_C_INT_STAT
;
694 port
->sram_chno
= SRAM_CH06
; /* VID_C */
695 port
->pci_irqmask
= 0x04; /* VID_C bit2 */
701 cx23885_risc_stopper(dev
->pci
, &port
->mpegq
.stopper
,
702 port
->reg_dma_ctl
, port
->dma_ctl_val
, 0x00);
707 static int cx23885_dev_setup(struct cx23885_dev
*dev
)
711 mutex_init(&dev
->lock
);
713 atomic_inc(&dev
->refcount
);
715 dev
->nr
= cx23885_devcount
++;
716 sprintf(dev
->name
, "cx23885[%d]", dev
->nr
);
718 mutex_lock(&devlist
);
719 list_add_tail(&dev
->devlist
, &cx23885_devlist
);
720 mutex_unlock(&devlist
);
722 /* Configure the internal memory */
723 if(dev
->pci
->device
== 0x8880) {
724 dev
->bridge
= CX23885_BRIDGE_887
;
725 dev
->sram_channels
= cx23887_sram_channels
;
727 if(dev
->pci
->device
== 0x8852) {
728 dev
->bridge
= CX23885_BRIDGE_885
;
729 dev
->sram_channels
= cx23885_sram_channels
;
733 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
734 __FUNCTION__
, dev
->bridge
);
738 if (card
[dev
->nr
] < cx23885_bcount
)
739 dev
->board
= card
[dev
->nr
];
740 for (i
= 0; UNSET
== dev
->board
&& i
< cx23885_idcount
; i
++)
741 if (dev
->pci
->subsystem_vendor
== cx23885_subids
[i
].subvendor
&&
742 dev
->pci
->subsystem_device
== cx23885_subids
[i
].subdevice
)
743 dev
->board
= cx23885_subids
[i
].card
;
744 if (UNSET
== dev
->board
) {
745 dev
->board
= CX23885_BOARD_UNKNOWN
;
746 cx23885_card_list(dev
);
749 dev
->pci_bus
= dev
->pci
->bus
->number
;
750 dev
->pci_slot
= PCI_SLOT(dev
->pci
->devfn
);
751 dev
->pci_irqmask
= 0x001f00;
753 /* External Master 1 Bus */
754 dev
->i2c_bus
[0].nr
= 0;
755 dev
->i2c_bus
[0].dev
= dev
;
756 dev
->i2c_bus
[0].reg_stat
= I2C1_STAT
;
757 dev
->i2c_bus
[0].reg_ctrl
= I2C1_CTRL
;
758 dev
->i2c_bus
[0].reg_addr
= I2C1_ADDR
;
759 dev
->i2c_bus
[0].reg_rdata
= I2C1_RDATA
;
760 dev
->i2c_bus
[0].reg_wdata
= I2C1_WDATA
;
761 dev
->i2c_bus
[0].i2c_period
= (0x9d << 24); /* 100kHz */
763 /* External Master 2 Bus */
764 dev
->i2c_bus
[1].nr
= 1;
765 dev
->i2c_bus
[1].dev
= dev
;
766 dev
->i2c_bus
[1].reg_stat
= I2C2_STAT
;
767 dev
->i2c_bus
[1].reg_ctrl
= I2C2_CTRL
;
768 dev
->i2c_bus
[1].reg_addr
= I2C2_ADDR
;
769 dev
->i2c_bus
[1].reg_rdata
= I2C2_RDATA
;
770 dev
->i2c_bus
[1].reg_wdata
= I2C2_WDATA
;
771 dev
->i2c_bus
[1].i2c_period
= (0x9d << 24); /* 100kHz */
773 /* Internal Master 3 Bus */
774 dev
->i2c_bus
[2].nr
= 2;
775 dev
->i2c_bus
[2].dev
= dev
;
776 dev
->i2c_bus
[2].reg_stat
= I2C3_STAT
;
777 dev
->i2c_bus
[2].reg_ctrl
= I2C3_CTRL
;
778 dev
->i2c_bus
[2].reg_addr
= I2C3_ADDR
;
779 dev
->i2c_bus
[2].reg_rdata
= I2C3_RDATA
;
780 dev
->i2c_bus
[2].reg_wdata
= I2C3_WDATA
;
781 dev
->i2c_bus
[2].i2c_period
= (0x07 << 24); /* 1.95MHz */
783 if(cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_DVB
)
784 cx23885_init_tsport(dev
, &dev
->ts1
, 1);
786 if(cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_DVB
)
787 cx23885_init_tsport(dev
, &dev
->ts2
, 2);
789 if (get_resources(dev
) < 0) {
790 printk(KERN_ERR
"CORE %s No more PCIe resources for "
791 "subsystem: %04x:%04x\n",
792 dev
->name
, dev
->pci
->subsystem_vendor
,
793 dev
->pci
->subsystem_device
);
800 dev
->lmmio
= ioremap(pci_resource_start(dev
->pci
,0),
801 pci_resource_len(dev
->pci
,0));
803 dev
->bmmio
= (u8 __iomem
*)dev
->lmmio
;
805 printk(KERN_INFO
"CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
806 dev
->name
, dev
->pci
->subsystem_vendor
,
807 dev
->pci
->subsystem_device
, cx23885_boards
[dev
->board
].name
,
808 dev
->board
, card
[dev
->nr
] == dev
->board
?
809 "insmod option" : "autodetected");
811 cx23885_pci_quirks(dev
);
816 cx23885_i2c_register(&dev
->i2c_bus
[0]);
817 cx23885_i2c_register(&dev
->i2c_bus
[1]);
818 cx23885_i2c_register(&dev
->i2c_bus
[2]);
819 cx23885_call_i2c_clients (&dev
->i2c_bus
[0], TUNER_SET_STANDBY
, NULL
);
820 cx23885_card_setup(dev
);
821 cx23885_ir_init(dev
);
823 if(cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_DVB
) {
824 if (cx23885_dvb_register(&dev
->ts1
) < 0) {
825 printk(KERN_ERR
"%s() Failed to register dvb adapters on VID_B\n",
830 if(cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_DVB
) {
831 if (cx23885_dvb_register(&dev
->ts2
) < 0) {
832 printk(KERN_ERR
"%s() Failed to register dvb adapters on VID_C\n",
844 void cx23885_dev_unregister(struct cx23885_dev
*dev
)
846 release_mem_region(pci_resource_start(dev
->pci
,0),
847 pci_resource_len(dev
->pci
,0));
849 if (!atomic_dec_and_test(&dev
->refcount
))
852 if(cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_DVB
)
853 cx23885_dvb_unregister(&dev
->ts1
);
855 if(cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_DVB
)
856 cx23885_dvb_unregister(&dev
->ts2
);
858 cx23885_i2c_unregister(&dev
->i2c_bus
[2]);
859 cx23885_i2c_unregister(&dev
->i2c_bus
[1]);
860 cx23885_i2c_unregister(&dev
->i2c_bus
[0]);
865 static u32
* cx23885_risc_field(u32
*rp
, struct scatterlist
*sglist
,
866 unsigned int offset
, u32 sync_line
,
867 unsigned int bpl
, unsigned int padding
,
870 struct scatterlist
*sg
;
871 unsigned int line
, todo
;
873 /* sync instruction */
874 if (sync_line
!= NO_SYNC_LINE
)
875 *(rp
++) = cpu_to_le32(RISC_RESYNC
| sync_line
);
879 for (line
= 0; line
< lines
; line
++) {
880 while (offset
&& offset
>= sg_dma_len(sg
)) {
881 offset
-= sg_dma_len(sg
);
884 if (bpl
<= sg_dma_len(sg
)-offset
) {
885 /* fits into current chunk */
886 *(rp
++)=cpu_to_le32(RISC_WRITE
|RISC_SOL
|RISC_EOL
|bpl
);
887 *(rp
++)=cpu_to_le32(sg_dma_address(sg
)+offset
);
888 *(rp
++)=cpu_to_le32(0); /* bits 63-32 */
891 /* scanline needs to be split */
893 *(rp
++)=cpu_to_le32(RISC_WRITE
|RISC_SOL
|
894 (sg_dma_len(sg
)-offset
));
895 *(rp
++)=cpu_to_le32(sg_dma_address(sg
)+offset
);
896 *(rp
++)=cpu_to_le32(0); /* bits 63-32 */
897 todo
-= (sg_dma_len(sg
)-offset
);
900 while (todo
> sg_dma_len(sg
)) {
901 *(rp
++)=cpu_to_le32(RISC_WRITE
|
903 *(rp
++)=cpu_to_le32(sg_dma_address(sg
));
904 *(rp
++)=cpu_to_le32(0); /* bits 63-32 */
905 todo
-= sg_dma_len(sg
);
908 *(rp
++)=cpu_to_le32(RISC_WRITE
|RISC_EOL
|todo
);
909 *(rp
++)=cpu_to_le32(sg_dma_address(sg
));
910 *(rp
++)=cpu_to_le32(0); /* bits 63-32 */
919 int cx23885_risc_buffer(struct pci_dev
*pci
, struct btcx_riscmem
*risc
,
920 struct scatterlist
*sglist
, unsigned int top_offset
,
921 unsigned int bottom_offset
, unsigned int bpl
,
922 unsigned int padding
, unsigned int lines
)
924 u32 instructions
, fields
;
929 if (UNSET
!= top_offset
)
931 if (UNSET
!= bottom_offset
)
934 /* estimate risc mem: worst case is one write per page border +
935 one write per scan line + syncs + jump (all 2 dwords). Padding
936 can cause next bpl to start close to a page border. First DMA
937 region may be smaller than PAGE_SIZE */
938 /* write and jump need and extra dword */
939 instructions
= fields
* (1 + ((bpl
+ padding
) * lines
) / PAGE_SIZE
+ lines
);
941 if ((rc
= btcx_riscmem_alloc(pci
,risc
,instructions
*12)) < 0)
944 /* write risc instructions */
946 if (UNSET
!= top_offset
)
947 rp
= cx23885_risc_field(rp
, sglist
, top_offset
, 0,
948 bpl
, padding
, lines
);
949 if (UNSET
!= bottom_offset
)
950 rp
= cx23885_risc_field(rp
, sglist
, bottom_offset
, 0x200,
951 bpl
, padding
, lines
);
953 /* save pointer to jmp instruction address */
955 BUG_ON((risc
->jmp
- risc
->cpu
+ 2) * sizeof (*risc
->cpu
) > risc
->size
);
959 int cx23885_risc_databuffer(struct pci_dev
*pci
, struct btcx_riscmem
*risc
,
960 struct scatterlist
*sglist
, unsigned int bpl
,
967 /* estimate risc mem: worst case is one write per page border +
968 one write per scan line + syncs + jump (all 2 dwords). Here
969 there is no padding and no sync. First DMA region may be smaller
971 /* Jump and write need an extra dword */
972 instructions
= 1 + (bpl
* lines
) / PAGE_SIZE
+ lines
;
975 if ((rc
= btcx_riscmem_alloc(pci
,risc
,instructions
*12)) < 0)
978 /* write risc instructions */
980 rp
= cx23885_risc_field(rp
, sglist
, 0, NO_SYNC_LINE
, bpl
, 0, lines
);
982 /* save pointer to jmp instruction address */
984 BUG_ON((risc
->jmp
- risc
->cpu
+ 2) * sizeof (*risc
->cpu
) > risc
->size
);
988 int cx23885_risc_stopper(struct pci_dev
*pci
, struct btcx_riscmem
*risc
,
989 u32 reg
, u32 mask
, u32 value
)
994 if ((rc
= btcx_riscmem_alloc(pci
, risc
, 4*16)) < 0)
997 /* write risc instructions */
999 *(rp
++) = cpu_to_le32(RISC_WRITECR
| RISC_IRQ2
);
1000 *(rp
++) = cpu_to_le32(reg
);
1001 *(rp
++) = cpu_to_le32(value
);
1002 *(rp
++) = cpu_to_le32(mask
);
1003 *(rp
++) = cpu_to_le32(RISC_JUMP
);
1004 *(rp
++) = cpu_to_le32(risc
->dma
);
1005 *(rp
++) = cpu_to_le32(0); /* bits 63-32 */
1009 void cx23885_free_buffer(struct videobuf_queue
*q
, struct cx23885_buffer
*buf
)
1011 struct videobuf_dmabuf
*dma
= videobuf_to_dma(&buf
->vb
);
1013 BUG_ON(in_interrupt());
1014 videobuf_waiton(&buf
->vb
, 0, 0);
1015 videobuf_dma_unmap(q
, dma
);
1016 videobuf_dma_free(dma
);
1017 btcx_riscmem_free((struct pci_dev
*)q
->dev
, &buf
->risc
);
1018 buf
->vb
.state
= STATE_NEEDS_INIT
;
1021 static int cx23885_start_dma(struct cx23885_tsport
*port
,
1022 struct cx23885_dmaqueue
*q
,
1023 struct cx23885_buffer
*buf
)
1025 struct cx23885_dev
*dev
= port
->dev
;
1027 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __FUNCTION__
,
1028 buf
->vb
.width
, buf
->vb
.height
, buf
->vb
.field
);
1030 /* setup fifo + format */
1031 cx23885_sram_channel_setup(dev
,
1032 &dev
->sram_channels
[ port
->sram_chno
],
1033 port
->ts_packet_size
, buf
->risc
.dma
);
1035 cx23885_sram_channel_dump(dev
, &dev
->sram_channels
[ port
->sram_chno
] );
1036 cx23885_risc_disasm(port
, &buf
->risc
);
1039 /* write TS length to chip */
1040 cx_write(port
->reg_lngth
, buf
->vb
.width
);
1042 if ( (!(cx23885_boards
[dev
->board
].portb
& CX23885_MPEG_DVB
)) &&
1043 (!(cx23885_boards
[dev
->board
].portc
& CX23885_MPEG_DVB
)) ) {
1044 printk( "%s() Failed. Unsupported value in .portb/c (0x%08x)/(0x%08x)\n",
1046 cx23885_boards
[dev
->board
].portb
,
1047 cx23885_boards
[dev
->board
].portc
);
1053 /* If the port supports SRC SELECT, configure it */
1054 if(port
->reg_src_sel
)
1055 cx_write(port
->reg_src_sel
, port
->src_sel_val
);
1057 cx_write(port
->reg_hw_sop_ctrl
, 0x47 << 16 | 188 << 4);
1058 cx_write(port
->reg_ts_clk_en
, port
->ts_clk_en_val
);
1059 cx_write(port
->reg_vld_misc
, 0x00);
1060 cx_write(port
->reg_gen_ctrl
, port
->gen_ctrl_val
);
1063 // NOTE: this is 2 (reserved) for portb, does it matter?
1064 /* reset counter to zero */
1065 cx_write(port
->reg_gpcnt_ctl
, 3);
1068 switch(dev
->bridge
) {
1069 case CX23885_BRIDGE_885
:
1070 case CX23885_BRIDGE_887
:
1072 dprintk(1, "%s() enabling TS int's and DMA\n", __FUNCTION__
);
1073 cx_set(port
->reg_ts_int_msk
, port
->ts_int_msk_val
);
1074 cx_set(port
->reg_dma_ctl
, port
->dma_ctl_val
);
1075 cx_set(PCI_INT_MSK
, dev
->pci_irqmask
| port
->pci_irqmask
);
1081 cx_set(DEV_CNTRL2
, (1<<5)); /* Enable RISC controller */
1086 static int cx23885_stop_dma(struct cx23885_tsport
*port
)
1088 struct cx23885_dev
*dev
= port
->dev
;
1089 dprintk(1, "%s()\n", __FUNCTION__
);
1091 /* Stop interrupts and DMA */
1092 cx_clear(port
->reg_ts_int_msk
, port
->ts_int_msk_val
);
1093 cx_clear(port
->reg_dma_ctl
, port
->dma_ctl_val
);
1098 static int cx23885_restart_queue(struct cx23885_tsport
*port
,
1099 struct cx23885_dmaqueue
*q
)
1101 struct cx23885_dev
*dev
= port
->dev
;
1102 struct cx23885_buffer
*buf
;
1104 dprintk(5, "%s()\n", __FUNCTION__
);
1105 if (list_empty(&q
->active
))
1107 struct cx23885_buffer
*prev
;
1110 dprintk(5, "%s() queue is empty\n", __FUNCTION__
);
1113 if (list_empty(&q
->queued
))
1115 buf
= list_entry(q
->queued
.next
, struct cx23885_buffer
,
1118 list_del(&buf
->vb
.queue
);
1119 list_add_tail(&buf
->vb
.queue
, &q
->active
);
1120 cx23885_start_dma(port
, q
, buf
);
1121 buf
->vb
.state
= STATE_ACTIVE
;
1122 buf
->count
= q
->count
++;
1123 mod_timer(&q
->timeout
, jiffies
+BUFFER_TIMEOUT
);
1124 dprintk(5, "[%p/%d] restart_queue - first active\n",
1127 } else if (prev
->vb
.width
== buf
->vb
.width
&&
1128 prev
->vb
.height
== buf
->vb
.height
&&
1129 prev
->fmt
== buf
->fmt
) {
1130 list_del(&buf
->vb
.queue
);
1131 list_add_tail(&buf
->vb
.queue
, &q
->active
);
1132 buf
->vb
.state
= STATE_ACTIVE
;
1133 buf
->count
= q
->count
++;
1134 prev
->risc
.jmp
[1] = cpu_to_le32(buf
->risc
.dma
);
1135 prev
->risc
.jmp
[2] = cpu_to_le32(0); /* 64 bit bits 63-32 */
1136 dprintk(5,"[%p/%d] restart_queue - move to active\n",
1146 buf
= list_entry(q
->active
.next
, struct cx23885_buffer
, vb
.queue
);
1147 dprintk(2, "restart_queue [%p/%d]: restart dma\n",
1149 cx23885_start_dma(port
, q
, buf
);
1150 list_for_each_entry(buf
, &q
->active
, vb
.queue
)
1151 buf
->count
= q
->count
++;
1152 mod_timer(&q
->timeout
, jiffies
+ BUFFER_TIMEOUT
);
1156 /* ------------------------------------------------------------------ */
1158 int cx23885_buf_prepare(struct videobuf_queue
*q
, struct cx23885_tsport
*port
,
1159 struct cx23885_buffer
*buf
, enum v4l2_field field
)
1161 struct cx23885_dev
*dev
= port
->dev
;
1162 int size
= port
->ts_packet_size
* port
->ts_packet_count
;
1165 dprintk(1, "%s: %p\n", __FUNCTION__
, buf
);
1166 if (0 != buf
->vb
.baddr
&& buf
->vb
.bsize
< size
)
1169 if (STATE_NEEDS_INIT
== buf
->vb
.state
) {
1170 buf
->vb
.width
= port
->ts_packet_size
;
1171 buf
->vb
.height
= port
->ts_packet_count
;
1172 buf
->vb
.size
= size
;
1173 buf
->vb
.field
= field
/*V4L2_FIELD_TOP*/;
1175 if (0 != (rc
= videobuf_iolock(q
, &buf
->vb
, NULL
)))
1177 cx23885_risc_databuffer(dev
->pci
, &buf
->risc
,
1178 videobuf_to_dma(&buf
->vb
)->sglist
,
1179 buf
->vb
.width
, buf
->vb
.height
);
1181 buf
->vb
.state
= STATE_PREPARED
;
1185 cx23885_free_buffer(q
, buf
);
1189 void cx23885_buf_queue(struct cx23885_tsport
*port
, struct cx23885_buffer
*buf
)
1191 struct cx23885_buffer
*prev
;
1192 struct cx23885_dev
*dev
= port
->dev
;
1193 struct cx23885_dmaqueue
*cx88q
= &port
->mpegq
;
1195 /* add jump to stopper */
1196 buf
->risc
.jmp
[0] = cpu_to_le32(RISC_JUMP
| RISC_IRQ1
| RISC_CNT_INC
);
1197 buf
->risc
.jmp
[1] = cpu_to_le32(cx88q
->stopper
.dma
);
1198 buf
->risc
.jmp
[2] = cpu_to_le32(0); /* bits 63-32 */
1200 if (list_empty(&cx88q
->active
)) {
1201 dprintk( 1, "queue is empty - first active\n" );
1202 list_add_tail(&buf
->vb
.queue
, &cx88q
->active
);
1203 cx23885_start_dma(port
, cx88q
, buf
);
1204 buf
->vb
.state
= STATE_ACTIVE
;
1205 buf
->count
= cx88q
->count
++;
1206 mod_timer(&cx88q
->timeout
, jiffies
+ BUFFER_TIMEOUT
);
1207 dprintk(1, "[%p/%d] %s - first active\n",
1208 buf
, buf
->vb
.i
, __FUNCTION__
);
1210 dprintk( 1, "queue is not empty - append to active\n" );
1211 prev
= list_entry(cx88q
->active
.prev
, struct cx23885_buffer
,
1213 list_add_tail(&buf
->vb
.queue
, &cx88q
->active
);
1214 buf
->vb
.state
= STATE_ACTIVE
;
1215 buf
->count
= cx88q
->count
++;
1216 prev
->risc
.jmp
[1] = cpu_to_le32(buf
->risc
.dma
);
1217 prev
->risc
.jmp
[2] = cpu_to_le32(0); /* 64 bit bits 63-32 */
1218 dprintk( 1, "[%p/%d] %s - append to active\n",
1219 buf
, buf
->vb
.i
, __FUNCTION__
);
1223 /* ----------------------------------------------------------- */
1225 static void do_cancel_buffers(struct cx23885_tsport
*port
, char *reason
,
1228 struct cx23885_dev
*dev
= port
->dev
;
1229 struct cx23885_dmaqueue
*q
= &port
->mpegq
;
1230 struct cx23885_buffer
*buf
;
1231 unsigned long flags
;
1233 spin_lock_irqsave(&port
->slock
, flags
);
1234 while (!list_empty(&q
->active
)) {
1235 buf
= list_entry(q
->active
.next
, struct cx23885_buffer
,
1237 list_del(&buf
->vb
.queue
);
1238 buf
->vb
.state
= STATE_ERROR
;
1239 wake_up(&buf
->vb
.done
);
1240 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1241 buf
, buf
->vb
.i
, reason
, (unsigned long)buf
->risc
.dma
);
1244 dprintk(1, "restarting queue\n" );
1245 cx23885_restart_queue(port
, q
);
1247 spin_unlock_irqrestore(&port
->slock
, flags
);
1250 void cx23885_cancel_buffers(struct cx23885_tsport
*port
)
1252 struct cx23885_dev
*dev
= port
->dev
;
1253 struct cx23885_dmaqueue
*q
= &port
->mpegq
;
1255 dprintk(1, "%s()\n", __FUNCTION__
);
1256 del_timer_sync(&q
->timeout
);
1257 cx23885_stop_dma(port
);
1258 do_cancel_buffers(port
, "cancel", 0);
1261 static void cx23885_timeout(unsigned long data
)
1263 struct cx23885_tsport
*port
= (struct cx23885_tsport
*)data
;
1264 struct cx23885_dev
*dev
= port
->dev
;
1266 dprintk(1, "%s()\n",__FUNCTION__
);
1269 cx23885_sram_channel_dump(dev
, &dev
->sram_channels
[ port
->sram_chno
]);
1271 cx23885_stop_dma(port
);
1272 do_cancel_buffers(port
, "timeout", 1);
1275 static int cx23885_irq_ts(struct cx23885_tsport
*port
, u32 status
)
1277 struct cx23885_dev
*dev
= port
->dev
;
1281 if ( (status
& VID_BC_MSK_OPC_ERR
) ||
1282 (status
& VID_BC_MSK_BAD_PKT
) ||
1283 (status
& VID_BC_MSK_SYNC
) ||
1284 (status
& VID_BC_MSK_OF
))
1286 if (status
& VID_BC_MSK_OPC_ERR
)
1287 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n", VID_BC_MSK_OPC_ERR
);
1288 if (status
& VID_BC_MSK_BAD_PKT
)
1289 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n", VID_BC_MSK_BAD_PKT
);
1290 if (status
& VID_BC_MSK_SYNC
)
1291 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n", VID_BC_MSK_SYNC
);
1292 if (status
& VID_BC_MSK_OF
)
1293 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n", VID_BC_MSK_OF
);
1295 printk(KERN_ERR
"%s: mpeg risc op code error\n", dev
->name
);
1297 cx_clear(port
->reg_dma_ctl
, port
->dma_ctl_val
);
1298 cx23885_sram_channel_dump(dev
, &dev
->sram_channels
[ port
->sram_chno
]);
1300 } else if (status
& VID_BC_MSK_RISCI1
) {
1302 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1
);
1304 spin_lock(&port
->slock
);
1305 count
= cx_read(port
->reg_gpcnt
);
1306 cx23885_wakeup(port
, &port
->mpegq
, count
);
1307 spin_unlock(&port
->slock
);
1309 } else if (status
& VID_BC_MSK_RISCI2
) {
1311 dprintk(7, " (RISCI2 0x%08x)\n", VID_BC_MSK_RISCI2
);
1313 spin_lock(&port
->slock
);
1314 cx23885_restart_queue(port
, &port
->mpegq
);
1315 spin_unlock(&port
->slock
);
1319 cx_write(port
->reg_ts_int_stat
, status
);
1326 static irqreturn_t
cx23885_irq(int irq
, void *dev_id
)
1328 struct cx23885_dev
*dev
= dev_id
;
1329 struct cx23885_tsport
*ts1
= &dev
->ts1
;
1330 struct cx23885_tsport
*ts2
= &dev
->ts2
;
1331 u32 pci_status
, pci_mask
;
1332 u32 ts1_status
, ts1_mask
;
1333 u32 ts2_status
, ts2_mask
;
1334 int ts1_count
= 0, ts2_count
= 0, handled
= 0;
1336 pci_status
= cx_read(PCI_INT_STAT
);
1337 pci_mask
= cx_read(PCI_INT_MSK
);
1338 ts1_status
= cx_read(VID_B_INT_STAT
);
1339 ts1_mask
= cx_read(VID_B_INT_MSK
);
1340 ts2_status
= cx_read(VID_C_INT_STAT
);
1341 ts2_mask
= cx_read(VID_C_INT_MSK
);
1343 if ( (pci_status
== 0) && (ts2_status
== 0) && (ts1_status
== 0) )
1346 ts1_count
= cx_read(ts1
->reg_gpcnt
);
1347 ts2_count
= cx_read(ts2
->reg_gpcnt
);
1348 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n", pci_status
, pci_mask
);
1349 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n", ts1_status
, ts1_mask
, ts1_count
);
1350 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n", ts2_status
, ts2_mask
, ts2_count
);
1352 if ( (pci_status
& PCI_MSK_RISC_RD
) ||
1353 (pci_status
& PCI_MSK_RISC_WR
) ||
1354 (pci_status
& PCI_MSK_AL_RD
) ||
1355 (pci_status
& PCI_MSK_AL_WR
) ||
1356 (pci_status
& PCI_MSK_APB_DMA
) ||
1357 (pci_status
& PCI_MSK_VID_C
) ||
1358 (pci_status
& PCI_MSK_VID_B
) ||
1359 (pci_status
& PCI_MSK_VID_A
) ||
1360 (pci_status
& PCI_MSK_AUD_INT
) ||
1361 (pci_status
& PCI_MSK_AUD_EXT
) )
1364 if (pci_status
& PCI_MSK_RISC_RD
)
1365 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n", PCI_MSK_RISC_RD
);
1366 if (pci_status
& PCI_MSK_RISC_WR
)
1367 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n", PCI_MSK_RISC_WR
);
1368 if (pci_status
& PCI_MSK_AL_RD
)
1369 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n", PCI_MSK_AL_RD
);
1370 if (pci_status
& PCI_MSK_AL_WR
)
1371 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n", PCI_MSK_AL_WR
);
1372 if (pci_status
& PCI_MSK_APB_DMA
)
1373 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n", PCI_MSK_APB_DMA
);
1374 if (pci_status
& PCI_MSK_VID_C
)
1375 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n", PCI_MSK_VID_C
);
1376 if (pci_status
& PCI_MSK_VID_B
)
1377 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n", PCI_MSK_VID_B
);
1378 if (pci_status
& PCI_MSK_VID_A
)
1379 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n", PCI_MSK_VID_A
);
1380 if (pci_status
& PCI_MSK_AUD_INT
)
1381 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n", PCI_MSK_AUD_INT
);
1382 if (pci_status
& PCI_MSK_AUD_EXT
)
1383 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n", PCI_MSK_AUD_EXT
);
1388 handled
+= cx23885_irq_ts(ts1
, ts1_status
);
1391 handled
+= cx23885_irq_ts(ts2
, ts2_status
);
1394 cx_write(PCI_INT_STAT
, pci_status
);
1396 return IRQ_RETVAL(handled
);
1399 static int __devinit
cx23885_initdev(struct pci_dev
*pci_dev
,
1400 const struct pci_device_id
*pci_id
)
1402 struct cx23885_dev
*dev
;
1405 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
1411 if (pci_enable_device(pci_dev
)) {
1416 if (cx23885_dev_setup(dev
) < 0) {
1421 /* print pci info */
1422 pci_read_config_byte(pci_dev
, PCI_CLASS_REVISION
, &dev
->pci_rev
);
1423 pci_read_config_byte(pci_dev
, PCI_LATENCY_TIMER
, &dev
->pci_lat
);
1424 printk(KERN_INFO
"%s/0: found at %s, rev: %d, irq: %d, "
1425 "latency: %d, mmio: 0x%llx\n", dev
->name
,
1426 pci_name(pci_dev
), dev
->pci_rev
, pci_dev
->irq
,
1427 dev
->pci_lat
, (unsigned long long)pci_resource_start(pci_dev
,0));
1429 pci_set_master(pci_dev
);
1430 if (!pci_dma_supported(pci_dev
, 0xffffffff)) {
1431 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev
->name
);
1436 err
= request_irq(pci_dev
->irq
, cx23885_irq
,
1437 IRQF_SHARED
| IRQF_DISABLED
, dev
->name
, dev
);
1439 printk(KERN_ERR
"%s: can't get IRQ %d\n",
1440 dev
->name
, pci_dev
->irq
);
1444 pci_set_drvdata(pci_dev
, dev
);
1448 cx23885_dev_unregister(dev
);
1454 static void __devexit
cx23885_finidev(struct pci_dev
*pci_dev
)
1456 struct cx23885_dev
*dev
= pci_get_drvdata(pci_dev
);
1458 cx23885_shutdown(dev
);
1460 pci_disable_device(pci_dev
);
1462 /* unregister stuff */
1463 free_irq(pci_dev
->irq
, dev
);
1464 pci_set_drvdata(pci_dev
, NULL
);
1466 mutex_lock(&devlist
);
1467 list_del(&dev
->devlist
);
1468 mutex_unlock(&devlist
);
1470 cx23885_dev_unregister(dev
);
1474 static struct pci_device_id cx23885_pci_tbl
[] = {
1479 .subvendor
= PCI_ANY_ID
,
1480 .subdevice
= PCI_ANY_ID
,
1485 .subvendor
= PCI_ANY_ID
,
1486 .subdevice
= PCI_ANY_ID
,
1488 /* --- end of list --- */
1491 MODULE_DEVICE_TABLE(pci
, cx23885_pci_tbl
);
1493 static struct pci_driver cx23885_pci_driver
= {
1495 .id_table
= cx23885_pci_tbl
,
1496 .probe
= cx23885_initdev
,
1497 .remove
= __devexit_p(cx23885_finidev
),
1503 static int cx23885_init(void)
1505 printk(KERN_INFO
"cx23885 driver version %d.%d.%d loaded\n",
1506 (CX23885_VERSION_CODE
>> 16) & 0xff,
1507 (CX23885_VERSION_CODE
>> 8) & 0xff,
1508 CX23885_VERSION_CODE
& 0xff);
1510 printk(KERN_INFO
"cx23885: snapshot date %04d-%02d-%02d\n",
1511 SNAPSHOT
/10000, (SNAPSHOT
/100)%100, SNAPSHOT
%100);
1513 return pci_register_driver(&cx23885_pci_driver
);
1516 static void cx23885_fini(void)
1518 pci_unregister_driver(&cx23885_pci_driver
);
1521 module_init(cx23885_init
);
1522 module_exit(cx23885_fini
);
1524 /* ----------------------------------------------------------- */
1529 * kate: eol "unix"; indent-width 3; remove-trailing-space on; replace-trailing-space-save on; tab-width 8; replace-tabs off; space-indent off; mixed-indent off