2 * Driver for the Conexant CX23885 PCIe bridge
4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kmod.h>
23 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/div64.h>
28 #include <linux/firmware.h>
32 #include "altera-ci.h"
33 #include "cx23888-ir.h"
34 #include "cx23885-ir.h"
35 #include "cx23885-av.h"
36 #include "cx23885-input.h"
38 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
39 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
40 MODULE_LICENSE("GPL");
41 MODULE_VERSION(CX23885_VERSION
);
43 static unsigned int debug
;
44 module_param(debug
, int, 0644);
45 MODULE_PARM_DESC(debug
, "enable debug messages");
47 static unsigned int card
[] = {[0 ... (CX23885_MAXBOARDS
- 1)] = UNSET
};
48 module_param_array(card
, int, NULL
, 0444);
49 MODULE_PARM_DESC(card
, "card type");
51 #define dprintk(level, fmt, arg...)\
52 do { if (debug >= level)\
53 printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
56 static unsigned int cx23885_devcount
;
58 #define NO_SYNC_LINE (-1U)
60 /* FIXME, these allocations will change when
61 * analog arrives. The be reviewed.
63 * 1 line = 16 bytes of CDT
65 * cdt size = 16 * linesize
70 * 0x00000000 0x00008fff FIFO clusters
71 * 0x00010000 0x000104af Channel Management Data Structures
72 * 0x000104b0 0x000104ff Free
73 * 0x00010500 0x000108bf 15 channels * iqsize
74 * 0x000108c0 0x000108ff Free
75 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
76 * 15 channels * (iqsize + (maxlines * linesize))
77 * 0x00010ea0 0x00010xxx Free
80 static struct sram_channel cx23885_sram_channels
[] = {
83 .cmds_start
= 0x10000,
84 .ctrl_start
= 0x10380,
88 .ptr1_reg
= DMA1_PTR1
,
89 .ptr2_reg
= DMA1_PTR2
,
90 .cnt1_reg
= DMA1_CNT1
,
91 .cnt2_reg
= DMA1_CNT2
,
100 .ptr1_reg
= DMA2_PTR1
,
101 .ptr2_reg
= DMA2_PTR2
,
102 .cnt1_reg
= DMA2_CNT1
,
103 .cnt2_reg
= DMA2_CNT2
,
107 .cmds_start
= 0x100A0,
108 .ctrl_start
= 0x10400,
110 .fifo_start
= 0x5000,
112 .ptr1_reg
= DMA3_PTR1
,
113 .ptr2_reg
= DMA3_PTR2
,
114 .cnt1_reg
= DMA3_CNT1
,
115 .cnt2_reg
= DMA3_CNT2
,
124 .ptr1_reg
= DMA4_PTR1
,
125 .ptr2_reg
= DMA4_PTR2
,
126 .cnt1_reg
= DMA4_CNT1
,
127 .cnt2_reg
= DMA4_CNT2
,
136 .ptr1_reg
= DMA5_PTR1
,
137 .ptr2_reg
= DMA5_PTR2
,
138 .cnt1_reg
= DMA5_CNT1
,
139 .cnt2_reg
= DMA5_CNT2
,
143 .cmds_start
= 0x10140,
144 .ctrl_start
= 0x10440,
146 .fifo_start
= 0x6000,
148 .ptr1_reg
= DMA5_PTR1
,
149 .ptr2_reg
= DMA5_PTR2
,
150 .cnt1_reg
= DMA5_CNT1
,
151 .cnt2_reg
= DMA5_CNT2
,
155 .cmds_start
= 0x10190,
156 .ctrl_start
= 0x10480,
158 .fifo_start
= 0x7000,
160 .ptr1_reg
= DMA6_PTR1
,
161 .ptr2_reg
= DMA6_PTR2
,
162 .cnt1_reg
= DMA6_CNT1
,
163 .cnt2_reg
= DMA6_CNT2
,
172 .ptr1_reg
= DMA7_PTR1
,
173 .ptr2_reg
= DMA7_PTR2
,
174 .cnt1_reg
= DMA7_CNT1
,
175 .cnt2_reg
= DMA7_CNT2
,
184 .ptr1_reg
= DMA8_PTR1
,
185 .ptr2_reg
= DMA8_PTR2
,
186 .cnt1_reg
= DMA8_CNT1
,
187 .cnt2_reg
= DMA8_CNT2
,
191 static struct sram_channel cx23887_sram_channels
[] = {
194 .cmds_start
= 0x10000,
195 .ctrl_start
= 0x105b0,
199 .ptr1_reg
= DMA1_PTR1
,
200 .ptr2_reg
= DMA1_PTR2
,
201 .cnt1_reg
= DMA1_CNT1
,
202 .cnt2_reg
= DMA1_CNT2
,
205 .name
= "VID A (VBI)",
206 .cmds_start
= 0x10050,
207 .ctrl_start
= 0x105F0,
209 .fifo_start
= 0x3000,
211 .ptr1_reg
= DMA2_PTR1
,
212 .ptr2_reg
= DMA2_PTR2
,
213 .cnt1_reg
= DMA2_CNT1
,
214 .cnt2_reg
= DMA2_CNT2
,
218 .cmds_start
= 0x100A0,
219 .ctrl_start
= 0x10630,
221 .fifo_start
= 0x5000,
223 .ptr1_reg
= DMA3_PTR1
,
224 .ptr2_reg
= DMA3_PTR2
,
225 .cnt1_reg
= DMA3_CNT1
,
226 .cnt2_reg
= DMA3_CNT2
,
235 .ptr1_reg
= DMA4_PTR1
,
236 .ptr2_reg
= DMA4_PTR2
,
237 .cnt1_reg
= DMA4_CNT1
,
238 .cnt2_reg
= DMA4_CNT2
,
247 .ptr1_reg
= DMA5_PTR1
,
248 .ptr2_reg
= DMA5_PTR2
,
249 .cnt1_reg
= DMA5_CNT1
,
250 .cnt2_reg
= DMA5_CNT2
,
254 .cmds_start
= 0x10140,
255 .ctrl_start
= 0x10670,
257 .fifo_start
= 0x6000,
259 .ptr1_reg
= DMA5_PTR1
,
260 .ptr2_reg
= DMA5_PTR2
,
261 .cnt1_reg
= DMA5_CNT1
,
262 .cnt2_reg
= DMA5_CNT2
,
266 .cmds_start
= 0x10190,
267 .ctrl_start
= 0x106B0,
269 .fifo_start
= 0x7000,
271 .ptr1_reg
= DMA6_PTR1
,
272 .ptr2_reg
= DMA6_PTR2
,
273 .cnt1_reg
= DMA6_CNT1
,
274 .cnt2_reg
= DMA6_CNT2
,
283 .ptr1_reg
= DMA7_PTR1
,
284 .ptr2_reg
= DMA7_PTR2
,
285 .cnt1_reg
= DMA7_CNT1
,
286 .cnt2_reg
= DMA7_CNT2
,
295 .ptr1_reg
= DMA8_PTR1
,
296 .ptr2_reg
= DMA8_PTR2
,
297 .cnt1_reg
= DMA8_CNT1
,
298 .cnt2_reg
= DMA8_CNT2
,
302 static void cx23885_irq_add(struct cx23885_dev
*dev
, u32 mask
)
305 spin_lock_irqsave(&dev
->pci_irqmask_lock
, flags
);
307 dev
->pci_irqmask
|= mask
;
309 spin_unlock_irqrestore(&dev
->pci_irqmask_lock
, flags
);
312 void cx23885_irq_add_enable(struct cx23885_dev
*dev
, u32 mask
)
315 spin_lock_irqsave(&dev
->pci_irqmask_lock
, flags
);
317 dev
->pci_irqmask
|= mask
;
318 cx_set(PCI_INT_MSK
, mask
);
320 spin_unlock_irqrestore(&dev
->pci_irqmask_lock
, flags
);
323 void cx23885_irq_enable(struct cx23885_dev
*dev
, u32 mask
)
327 spin_lock_irqsave(&dev
->pci_irqmask_lock
, flags
);
329 v
= mask
& dev
->pci_irqmask
;
331 cx_set(PCI_INT_MSK
, v
);
333 spin_unlock_irqrestore(&dev
->pci_irqmask_lock
, flags
);
336 static inline void cx23885_irq_enable_all(struct cx23885_dev
*dev
)
338 cx23885_irq_enable(dev
, 0xffffffff);
341 void cx23885_irq_disable(struct cx23885_dev
*dev
, u32 mask
)
344 spin_lock_irqsave(&dev
->pci_irqmask_lock
, flags
);
346 cx_clear(PCI_INT_MSK
, mask
);
348 spin_unlock_irqrestore(&dev
->pci_irqmask_lock
, flags
);
351 static inline void cx23885_irq_disable_all(struct cx23885_dev
*dev
)
353 cx23885_irq_disable(dev
, 0xffffffff);
356 void cx23885_irq_remove(struct cx23885_dev
*dev
, u32 mask
)
359 spin_lock_irqsave(&dev
->pci_irqmask_lock
, flags
);
361 dev
->pci_irqmask
&= ~mask
;
362 cx_clear(PCI_INT_MSK
, mask
);
364 spin_unlock_irqrestore(&dev
->pci_irqmask_lock
, flags
);
367 static u32
cx23885_irq_get_mask(struct cx23885_dev
*dev
)
371 spin_lock_irqsave(&dev
->pci_irqmask_lock
, flags
);
373 v
= cx_read(PCI_INT_MSK
);
375 spin_unlock_irqrestore(&dev
->pci_irqmask_lock
, flags
);
379 static int cx23885_risc_decode(u32 risc
)
381 static char *instr
[16] = {
382 [RISC_SYNC
>> 28] = "sync",
383 [RISC_WRITE
>> 28] = "write",
384 [RISC_WRITEC
>> 28] = "writec",
385 [RISC_READ
>> 28] = "read",
386 [RISC_READC
>> 28] = "readc",
387 [RISC_JUMP
>> 28] = "jump",
388 [RISC_SKIP
>> 28] = "skip",
389 [RISC_WRITERM
>> 28] = "writerm",
390 [RISC_WRITECM
>> 28] = "writecm",
391 [RISC_WRITECR
>> 28] = "writecr",
393 static int incr
[16] = {
394 [RISC_WRITE
>> 28] = 3,
395 [RISC_JUMP
>> 28] = 3,
396 [RISC_SKIP
>> 28] = 1,
397 [RISC_SYNC
>> 28] = 1,
398 [RISC_WRITERM
>> 28] = 3,
399 [RISC_WRITECM
>> 28] = 3,
400 [RISC_WRITECR
>> 28] = 4,
402 static char *bits
[] = {
403 "12", "13", "14", "resync",
404 "cnt0", "cnt1", "18", "19",
405 "20", "21", "22", "23",
406 "irq1", "irq2", "eol", "sol",
410 printk("0x%08x [ %s", risc
,
411 instr
[risc
>> 28] ? instr
[risc
>> 28] : "INVALID");
412 for (i
= ARRAY_SIZE(bits
) - 1; i
>= 0; i
--)
413 if (risc
& (1 << (i
+ 12)))
414 printk(" %s", bits
[i
]);
415 printk(" count=%d ]\n", risc
& 0xfff);
416 return incr
[risc
>> 28] ? incr
[risc
>> 28] : 1;
419 static void cx23885_wakeup(struct cx23885_tsport
*port
,
420 struct cx23885_dmaqueue
*q
, u32 count
)
422 struct cx23885_dev
*dev
= port
->dev
;
423 struct cx23885_buffer
*buf
;
425 if (list_empty(&q
->active
))
427 buf
= list_entry(q
->active
.next
,
428 struct cx23885_buffer
, queue
);
430 buf
->vb
.vb2_buf
.timestamp
= ktime_get_ns();
431 buf
->vb
.sequence
= q
->count
++;
432 dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf
,
433 buf
->vb
.vb2_buf
.index
,
435 list_del(&buf
->queue
);
436 vb2_buffer_done(&buf
->vb
.vb2_buf
, VB2_BUF_STATE_DONE
);
439 int cx23885_sram_channel_setup(struct cx23885_dev
*dev
,
440 struct sram_channel
*ch
,
441 unsigned int bpl
, u32 risc
)
443 unsigned int i
, lines
;
446 if (ch
->cmds_start
== 0) {
447 dprintk(1, "%s() Erasing channel [%s]\n", __func__
,
449 cx_write(ch
->ptr1_reg
, 0);
450 cx_write(ch
->ptr2_reg
, 0);
451 cx_write(ch
->cnt2_reg
, 0);
452 cx_write(ch
->cnt1_reg
, 0);
455 dprintk(1, "%s() Configuring channel [%s]\n", __func__
,
459 bpl
= (bpl
+ 7) & ~7; /* alignment */
461 lines
= ch
->fifo_size
/ bpl
;
466 cx_write(8 + 0, RISC_JUMP
| RISC_CNT_RESET
);
471 for (i
= 0; i
< lines
; i
++) {
472 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__
, cdt
+ 16*i
,
473 ch
->fifo_start
+ bpl
*i
);
474 cx_write(cdt
+ 16*i
, ch
->fifo_start
+ bpl
*i
);
475 cx_write(cdt
+ 16*i
+ 4, 0);
476 cx_write(cdt
+ 16*i
+ 8, 0);
477 cx_write(cdt
+ 16*i
+ 12, 0);
482 cx_write(ch
->cmds_start
+ 0, 8);
484 cx_write(ch
->cmds_start
+ 0, risc
);
485 cx_write(ch
->cmds_start
+ 4, 0); /* 64 bits 63-32 */
486 cx_write(ch
->cmds_start
+ 8, cdt
);
487 cx_write(ch
->cmds_start
+ 12, (lines
*16) >> 3);
488 cx_write(ch
->cmds_start
+ 16, ch
->ctrl_start
);
490 cx_write(ch
->cmds_start
+ 20, 0x80000000 | (64 >> 2));
492 cx_write(ch
->cmds_start
+ 20, 64 >> 2);
493 for (i
= 24; i
< 80; i
+= 4)
494 cx_write(ch
->cmds_start
+ i
, 0);
497 cx_write(ch
->ptr1_reg
, ch
->fifo_start
);
498 cx_write(ch
->ptr2_reg
, cdt
);
499 cx_write(ch
->cnt2_reg
, (lines
*16) >> 3);
500 cx_write(ch
->cnt1_reg
, (bpl
>> 3) - 1);
502 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
511 void cx23885_sram_channel_dump(struct cx23885_dev
*dev
,
512 struct sram_channel
*ch
)
514 static char *name
[] = {
531 unsigned int i
, j
, n
;
533 printk(KERN_WARNING
"%s: %s - dma channel status dump\n",
534 dev
->name
, ch
->name
);
535 for (i
= 0; i
< ARRAY_SIZE(name
); i
++)
536 printk(KERN_WARNING
"%s: cmds: %-15s: 0x%08x\n",
538 cx_read(ch
->cmds_start
+ 4*i
));
540 for (i
= 0; i
< 4; i
++) {
541 risc
= cx_read(ch
->cmds_start
+ 4 * (i
+ 14));
542 printk(KERN_WARNING
"%s: risc%d: ", dev
->name
, i
);
543 cx23885_risc_decode(risc
);
545 for (i
= 0; i
< (64 >> 2); i
+= n
) {
546 risc
= cx_read(ch
->ctrl_start
+ 4 * i
);
547 /* No consideration for bits 63-32 */
549 printk(KERN_WARNING
"%s: (0x%08x) iq %x: ", dev
->name
,
550 ch
->ctrl_start
+ 4 * i
, i
);
551 n
= cx23885_risc_decode(risc
);
552 for (j
= 1; j
< n
; j
++) {
553 risc
= cx_read(ch
->ctrl_start
+ 4 * (i
+ j
));
554 printk(KERN_WARNING
"%s: iq %x: 0x%08x [ arg #%d ]\n",
555 dev
->name
, i
+j
, risc
, j
);
559 printk(KERN_WARNING
"%s: fifo: 0x%08x -> 0x%x\n",
560 dev
->name
, ch
->fifo_start
, ch
->fifo_start
+ch
->fifo_size
);
561 printk(KERN_WARNING
"%s: ctrl: 0x%08x -> 0x%x\n",
562 dev
->name
, ch
->ctrl_start
, ch
->ctrl_start
+ 6*16);
563 printk(KERN_WARNING
"%s: ptr1_reg: 0x%08x\n",
564 dev
->name
, cx_read(ch
->ptr1_reg
));
565 printk(KERN_WARNING
"%s: ptr2_reg: 0x%08x\n",
566 dev
->name
, cx_read(ch
->ptr2_reg
));
567 printk(KERN_WARNING
"%s: cnt1_reg: 0x%08x\n",
568 dev
->name
, cx_read(ch
->cnt1_reg
));
569 printk(KERN_WARNING
"%s: cnt2_reg: 0x%08x\n",
570 dev
->name
, cx_read(ch
->cnt2_reg
));
573 static void cx23885_risc_disasm(struct cx23885_tsport
*port
,
574 struct cx23885_riscmem
*risc
)
576 struct cx23885_dev
*dev
= port
->dev
;
577 unsigned int i
, j
, n
;
579 printk(KERN_INFO
"%s: risc disasm: %p [dma=0x%08lx]\n",
580 dev
->name
, risc
->cpu
, (unsigned long)risc
->dma
);
581 for (i
= 0; i
< (risc
->size
>> 2); i
+= n
) {
582 printk(KERN_INFO
"%s: %04d: ", dev
->name
, i
);
583 n
= cx23885_risc_decode(le32_to_cpu(risc
->cpu
[i
]));
584 for (j
= 1; j
< n
; j
++)
585 printk(KERN_INFO
"%s: %04d: 0x%08x [ arg #%d ]\n",
586 dev
->name
, i
+ j
, risc
->cpu
[i
+ j
], j
);
587 if (risc
->cpu
[i
] == cpu_to_le32(RISC_JUMP
))
592 static void cx23885_shutdown(struct cx23885_dev
*dev
)
594 /* disable RISC controller */
595 cx_write(DEV_CNTRL2
, 0);
597 /* Disable all IR activity */
598 cx_write(IR_CNTRL_REG
, 0);
600 /* Disable Video A/B activity */
601 cx_write(VID_A_DMA_CTL
, 0);
602 cx_write(VID_B_DMA_CTL
, 0);
603 cx_write(VID_C_DMA_CTL
, 0);
605 /* Disable Audio activity */
606 cx_write(AUD_INT_DMA_CTL
, 0);
607 cx_write(AUD_EXT_DMA_CTL
, 0);
609 /* Disable Serial port */
610 cx_write(UART_CTL
, 0);
612 /* Disable Interrupts */
613 cx23885_irq_disable_all(dev
);
614 cx_write(VID_A_INT_MSK
, 0);
615 cx_write(VID_B_INT_MSK
, 0);
616 cx_write(VID_C_INT_MSK
, 0);
617 cx_write(AUDIO_INT_INT_MSK
, 0);
618 cx_write(AUDIO_EXT_INT_MSK
, 0);
622 static void cx23885_reset(struct cx23885_dev
*dev
)
624 dprintk(1, "%s()\n", __func__
);
626 cx23885_shutdown(dev
);
628 cx_write(PCI_INT_STAT
, 0xffffffff);
629 cx_write(VID_A_INT_STAT
, 0xffffffff);
630 cx_write(VID_B_INT_STAT
, 0xffffffff);
631 cx_write(VID_C_INT_STAT
, 0xffffffff);
632 cx_write(AUDIO_INT_INT_STAT
, 0xffffffff);
633 cx_write(AUDIO_EXT_INT_STAT
, 0xffffffff);
634 cx_write(CLK_DELAY
, cx_read(CLK_DELAY
) & 0x80000000);
635 cx_write(PAD_CTRL
, 0x00500300);
639 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH01
],
641 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH02
], 128, 0);
642 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH03
],
644 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH04
], 128, 0);
645 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH05
], 128, 0);
646 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH06
],
648 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH07
], 128, 0);
649 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH08
], 128, 0);
650 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH09
], 128, 0);
652 cx23885_gpio_setup(dev
);
656 static int cx23885_pci_quirks(struct cx23885_dev
*dev
)
658 dprintk(1, "%s()\n", __func__
);
660 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
661 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
662 * occur on the cx23887 bridge.
664 if (dev
->bridge
== CX23885_BRIDGE_885
)
665 cx_clear(RDR_TLCTL0
, 1 << 4);
670 static int get_resources(struct cx23885_dev
*dev
)
672 if (request_mem_region(pci_resource_start(dev
->pci
, 0),
673 pci_resource_len(dev
->pci
, 0),
677 printk(KERN_ERR
"%s: can't get MMIO memory @ 0x%llx\n",
678 dev
->name
, (unsigned long long)pci_resource_start(dev
->pci
, 0));
683 static int cx23885_init_tsport(struct cx23885_dev
*dev
,
684 struct cx23885_tsport
*port
, int portno
)
686 dprintk(1, "%s(portno=%d)\n", __func__
, portno
);
688 /* Transport bus init dma queue - Common settings */
689 port
->dma_ctl_val
= 0x11; /* Enable RISC controller and Fifo */
690 port
->ts_int_msk_val
= 0x1111; /* TS port bits for RISC */
691 port
->vld_misc_val
= 0x0;
692 port
->hw_sop_ctrl_val
= (0x47 << 16 | 188 << 4);
694 spin_lock_init(&port
->slock
);
698 INIT_LIST_HEAD(&port
->mpegq
.active
);
699 mutex_init(&port
->frontends
.lock
);
700 INIT_LIST_HEAD(&port
->frontends
.felist
);
701 port
->frontends
.active_fe_id
= 0;
703 /* This should be hardcoded allow a single frontend
704 * attachment to this tsport, keeping the -dvb.c
705 * code clean and safe.
707 if (!port
->num_frontends
)
708 port
->num_frontends
= 1;
712 port
->reg_gpcnt
= VID_B_GPCNT
;
713 port
->reg_gpcnt_ctl
= VID_B_GPCNT_CTL
;
714 port
->reg_dma_ctl
= VID_B_DMA_CTL
;
715 port
->reg_lngth
= VID_B_LNGTH
;
716 port
->reg_hw_sop_ctrl
= VID_B_HW_SOP_CTL
;
717 port
->reg_gen_ctrl
= VID_B_GEN_CTL
;
718 port
->reg_bd_pkt_status
= VID_B_BD_PKT_STATUS
;
719 port
->reg_sop_status
= VID_B_SOP_STATUS
;
720 port
->reg_fifo_ovfl_stat
= VID_B_FIFO_OVFL_STAT
;
721 port
->reg_vld_misc
= VID_B_VLD_MISC
;
722 port
->reg_ts_clk_en
= VID_B_TS_CLK_EN
;
723 port
->reg_src_sel
= VID_B_SRC_SEL
;
724 port
->reg_ts_int_msk
= VID_B_INT_MSK
;
725 port
->reg_ts_int_stat
= VID_B_INT_STAT
;
726 port
->sram_chno
= SRAM_CH03
; /* VID_B */
727 port
->pci_irqmask
= 0x02; /* VID_B bit1 */
730 port
->reg_gpcnt
= VID_C_GPCNT
;
731 port
->reg_gpcnt_ctl
= VID_C_GPCNT_CTL
;
732 port
->reg_dma_ctl
= VID_C_DMA_CTL
;
733 port
->reg_lngth
= VID_C_LNGTH
;
734 port
->reg_hw_sop_ctrl
= VID_C_HW_SOP_CTL
;
735 port
->reg_gen_ctrl
= VID_C_GEN_CTL
;
736 port
->reg_bd_pkt_status
= VID_C_BD_PKT_STATUS
;
737 port
->reg_sop_status
= VID_C_SOP_STATUS
;
738 port
->reg_fifo_ovfl_stat
= VID_C_FIFO_OVFL_STAT
;
739 port
->reg_vld_misc
= VID_C_VLD_MISC
;
740 port
->reg_ts_clk_en
= VID_C_TS_CLK_EN
;
741 port
->reg_src_sel
= 0;
742 port
->reg_ts_int_msk
= VID_C_INT_MSK
;
743 port
->reg_ts_int_stat
= VID_C_INT_STAT
;
744 port
->sram_chno
= SRAM_CH06
; /* VID_C */
745 port
->pci_irqmask
= 0x04; /* VID_C bit2 */
754 static void cx23885_dev_checkrevision(struct cx23885_dev
*dev
)
756 switch (cx_read(RDR_CFG2
) & 0xff) {
759 dev
->hwrevision
= 0xa0;
763 dev
->hwrevision
= 0xa1;
766 /* CX23885-13Z/14Z */
767 dev
->hwrevision
= 0xb0;
770 if (dev
->pci
->device
== 0x8880) {
771 /* CX23888-21Z/22Z */
772 dev
->hwrevision
= 0xc0;
775 dev
->hwrevision
= 0xa4;
779 if (dev
->pci
->device
== 0x8880) {
781 dev
->hwrevision
= 0xd0;
783 /* CX23885-15Z, CX23888-31Z */
784 dev
->hwrevision
= 0xa5;
789 dev
->hwrevision
= 0xc0;
793 dev
->hwrevision
= 0xb1;
796 printk(KERN_ERR
"%s() New hardware revision found 0x%x\n",
797 __func__
, dev
->hwrevision
);
800 printk(KERN_INFO
"%s() Hardware revision = 0x%02x\n",
801 __func__
, dev
->hwrevision
);
803 printk(KERN_ERR
"%s() Hardware revision unknown 0x%x\n",
804 __func__
, dev
->hwrevision
);
807 /* Find the first v4l2_subdev member of the group id in hw */
808 struct v4l2_subdev
*cx23885_find_hw(struct cx23885_dev
*dev
, u32 hw
)
810 struct v4l2_subdev
*result
= NULL
;
811 struct v4l2_subdev
*sd
;
813 spin_lock(&dev
->v4l2_dev
.lock
);
814 v4l2_device_for_each_subdev(sd
, &dev
->v4l2_dev
) {
815 if (sd
->grp_id
== hw
) {
820 spin_unlock(&dev
->v4l2_dev
.lock
);
824 static int cx23885_dev_setup(struct cx23885_dev
*dev
)
828 spin_lock_init(&dev
->pci_irqmask_lock
);
829 spin_lock_init(&dev
->slock
);
831 mutex_init(&dev
->lock
);
832 mutex_init(&dev
->gpio_lock
);
834 atomic_inc(&dev
->refcount
);
836 dev
->nr
= cx23885_devcount
++;
837 sprintf(dev
->name
, "cx23885[%d]", dev
->nr
);
839 /* Configure the internal memory */
840 if (dev
->pci
->device
== 0x8880) {
841 /* Could be 887 or 888, assume a default */
842 dev
->bridge
= CX23885_BRIDGE_887
;
843 /* Apply a sensible clock frequency for the PCIe bridge */
844 dev
->clk_freq
= 25000000;
845 dev
->sram_channels
= cx23887_sram_channels
;
847 if (dev
->pci
->device
== 0x8852) {
848 dev
->bridge
= CX23885_BRIDGE_885
;
849 /* Apply a sensible clock frequency for the PCIe bridge */
850 dev
->clk_freq
= 28000000;
851 dev
->sram_channels
= cx23885_sram_channels
;
855 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
856 __func__
, dev
->bridge
);
860 if (card
[dev
->nr
] < cx23885_bcount
)
861 dev
->board
= card
[dev
->nr
];
862 for (i
= 0; UNSET
== dev
->board
&& i
< cx23885_idcount
; i
++)
863 if (dev
->pci
->subsystem_vendor
== cx23885_subids
[i
].subvendor
&&
864 dev
->pci
->subsystem_device
== cx23885_subids
[i
].subdevice
)
865 dev
->board
= cx23885_subids
[i
].card
;
866 if (UNSET
== dev
->board
) {
867 dev
->board
= CX23885_BOARD_UNKNOWN
;
868 cx23885_card_list(dev
);
871 /* If the user specific a clk freq override, apply it */
872 if (cx23885_boards
[dev
->board
].clk_freq
> 0)
873 dev
->clk_freq
= cx23885_boards
[dev
->board
].clk_freq
;
875 dev
->pci_bus
= dev
->pci
->bus
->number
;
876 dev
->pci_slot
= PCI_SLOT(dev
->pci
->devfn
);
877 cx23885_irq_add(dev
, 0x001f00);
879 /* External Master 1 Bus */
880 dev
->i2c_bus
[0].nr
= 0;
881 dev
->i2c_bus
[0].dev
= dev
;
882 dev
->i2c_bus
[0].reg_stat
= I2C1_STAT
;
883 dev
->i2c_bus
[0].reg_ctrl
= I2C1_CTRL
;
884 dev
->i2c_bus
[0].reg_addr
= I2C1_ADDR
;
885 dev
->i2c_bus
[0].reg_rdata
= I2C1_RDATA
;
886 dev
->i2c_bus
[0].reg_wdata
= I2C1_WDATA
;
887 dev
->i2c_bus
[0].i2c_period
= (0x9d << 24); /* 100kHz */
889 /* External Master 2 Bus */
890 dev
->i2c_bus
[1].nr
= 1;
891 dev
->i2c_bus
[1].dev
= dev
;
892 dev
->i2c_bus
[1].reg_stat
= I2C2_STAT
;
893 dev
->i2c_bus
[1].reg_ctrl
= I2C2_CTRL
;
894 dev
->i2c_bus
[1].reg_addr
= I2C2_ADDR
;
895 dev
->i2c_bus
[1].reg_rdata
= I2C2_RDATA
;
896 dev
->i2c_bus
[1].reg_wdata
= I2C2_WDATA
;
897 dev
->i2c_bus
[1].i2c_period
= (0x9d << 24); /* 100kHz */
899 /* Internal Master 3 Bus */
900 dev
->i2c_bus
[2].nr
= 2;
901 dev
->i2c_bus
[2].dev
= dev
;
902 dev
->i2c_bus
[2].reg_stat
= I2C3_STAT
;
903 dev
->i2c_bus
[2].reg_ctrl
= I2C3_CTRL
;
904 dev
->i2c_bus
[2].reg_addr
= I2C3_ADDR
;
905 dev
->i2c_bus
[2].reg_rdata
= I2C3_RDATA
;
906 dev
->i2c_bus
[2].reg_wdata
= I2C3_WDATA
;
907 dev
->i2c_bus
[2].i2c_period
= (0x07 << 24); /* 1.95MHz */
909 if ((cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_DVB
) ||
910 (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
))
911 cx23885_init_tsport(dev
, &dev
->ts1
, 1);
913 if ((cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_DVB
) ||
914 (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_ENCODER
))
915 cx23885_init_tsport(dev
, &dev
->ts2
, 2);
917 if (get_resources(dev
) < 0) {
918 printk(KERN_ERR
"CORE %s No more PCIe resources for "
919 "subsystem: %04x:%04x\n",
920 dev
->name
, dev
->pci
->subsystem_vendor
,
921 dev
->pci
->subsystem_device
);
928 dev
->lmmio
= ioremap(pci_resource_start(dev
->pci
, 0),
929 pci_resource_len(dev
->pci
, 0));
931 dev
->bmmio
= (u8 __iomem
*)dev
->lmmio
;
933 printk(KERN_INFO
"CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
934 dev
->name
, dev
->pci
->subsystem_vendor
,
935 dev
->pci
->subsystem_device
, cx23885_boards
[dev
->board
].name
,
936 dev
->board
, card
[dev
->nr
] == dev
->board
?
937 "insmod option" : "autodetected");
939 cx23885_pci_quirks(dev
);
941 /* Assume some sensible defaults */
942 dev
->tuner_type
= cx23885_boards
[dev
->board
].tuner_type
;
943 dev
->tuner_addr
= cx23885_boards
[dev
->board
].tuner_addr
;
944 dev
->tuner_bus
= cx23885_boards
[dev
->board
].tuner_bus
;
945 dev
->radio_type
= cx23885_boards
[dev
->board
].radio_type
;
946 dev
->radio_addr
= cx23885_boards
[dev
->board
].radio_addr
;
948 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
949 __func__
, dev
->tuner_type
, dev
->tuner_addr
, dev
->tuner_bus
);
950 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
951 __func__
, dev
->radio_type
, dev
->radio_addr
);
953 /* The cx23417 encoder has GPIO's that need to be initialised
954 * before DVB, so that demodulators and tuners are out of
955 * reset before DVB uses them.
957 if ((cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
) ||
958 (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_ENCODER
))
959 cx23885_mc417_init(dev
);
964 cx23885_i2c_register(&dev
->i2c_bus
[0]);
965 cx23885_i2c_register(&dev
->i2c_bus
[1]);
966 cx23885_i2c_register(&dev
->i2c_bus
[2]);
967 cx23885_card_setup(dev
);
968 call_all(dev
, core
, s_power
, 0);
969 cx23885_ir_init(dev
);
971 if (dev
->board
== CX23885_BOARD_VIEWCAST_460E
) {
973 * GPIOs 9/8 are input detection bits for the breakout video
974 * (gpio 8) and audio (gpio 9) cables. When they're attached,
975 * this gpios are pulled high. Make sure these GPIOs are marked
978 cx23885_gpio_enable(dev
, 0x300, 0);
981 if (cx23885_boards
[dev
->board
].porta
== CX23885_ANALOG_VIDEO
) {
982 if (cx23885_video_register(dev
) < 0) {
983 printk(KERN_ERR
"%s() Failed to register analog "
984 "video adapters on VID_A\n", __func__
);
988 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_DVB
) {
989 if (cx23885_boards
[dev
->board
].num_fds_portb
)
990 dev
->ts1
.num_frontends
=
991 cx23885_boards
[dev
->board
].num_fds_portb
;
992 if (cx23885_dvb_register(&dev
->ts1
) < 0) {
993 printk(KERN_ERR
"%s() Failed to register dvb adapters on VID_B\n",
997 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
) {
998 if (cx23885_417_register(dev
) < 0) {
1000 "%s() Failed to register 417 on VID_B\n",
1005 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_DVB
) {
1006 if (cx23885_boards
[dev
->board
].num_fds_portc
)
1007 dev
->ts2
.num_frontends
=
1008 cx23885_boards
[dev
->board
].num_fds_portc
;
1009 if (cx23885_dvb_register(&dev
->ts2
) < 0) {
1011 "%s() Failed to register dvb on VID_C\n",
1015 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_ENCODER
) {
1016 if (cx23885_417_register(dev
) < 0) {
1018 "%s() Failed to register 417 on VID_C\n",
1023 cx23885_dev_checkrevision(dev
);
1025 /* disable MSI for NetUP cards, otherwise CI is not working */
1026 if (cx23885_boards
[dev
->board
].ci_type
> 0)
1027 cx_clear(RDR_RDRCTL1
, 1 << 8);
1029 switch (dev
->board
) {
1030 case CX23885_BOARD_TEVII_S470
:
1031 case CX23885_BOARD_TEVII_S471
:
1032 cx_clear(RDR_RDRCTL1
, 1 << 8);
1039 static void cx23885_dev_unregister(struct cx23885_dev
*dev
)
1041 release_mem_region(pci_resource_start(dev
->pci
, 0),
1042 pci_resource_len(dev
->pci
, 0));
1044 if (!atomic_dec_and_test(&dev
->refcount
))
1047 if (cx23885_boards
[dev
->board
].porta
== CX23885_ANALOG_VIDEO
)
1048 cx23885_video_unregister(dev
);
1050 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_DVB
)
1051 cx23885_dvb_unregister(&dev
->ts1
);
1053 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
)
1054 cx23885_417_unregister(dev
);
1056 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_DVB
)
1057 cx23885_dvb_unregister(&dev
->ts2
);
1059 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_ENCODER
)
1060 cx23885_417_unregister(dev
);
1062 cx23885_i2c_unregister(&dev
->i2c_bus
[2]);
1063 cx23885_i2c_unregister(&dev
->i2c_bus
[1]);
1064 cx23885_i2c_unregister(&dev
->i2c_bus
[0]);
1066 iounmap(dev
->lmmio
);
1069 static __le32
*cx23885_risc_field(__le32
*rp
, struct scatterlist
*sglist
,
1070 unsigned int offset
, u32 sync_line
,
1071 unsigned int bpl
, unsigned int padding
,
1072 unsigned int lines
, unsigned int lpi
, bool jump
)
1074 struct scatterlist
*sg
;
1075 unsigned int line
, todo
, sol
;
1079 *(rp
++) = cpu_to_le32(RISC_JUMP
);
1080 *(rp
++) = cpu_to_le32(0);
1081 *(rp
++) = cpu_to_le32(0); /* bits 63-32 */
1084 /* sync instruction */
1085 if (sync_line
!= NO_SYNC_LINE
)
1086 *(rp
++) = cpu_to_le32(RISC_RESYNC
| sync_line
);
1090 for (line
= 0; line
< lines
; line
++) {
1091 while (offset
&& offset
>= sg_dma_len(sg
)) {
1092 offset
-= sg_dma_len(sg
);
1096 if (lpi
&& line
> 0 && !(line
% lpi
))
1097 sol
= RISC_SOL
| RISC_IRQ1
| RISC_CNT_INC
;
1101 if (bpl
<= sg_dma_len(sg
)-offset
) {
1102 /* fits into current chunk */
1103 *(rp
++) = cpu_to_le32(RISC_WRITE
|sol
|RISC_EOL
|bpl
);
1104 *(rp
++) = cpu_to_le32(sg_dma_address(sg
)+offset
);
1105 *(rp
++) = cpu_to_le32(0); /* bits 63-32 */
1108 /* scanline needs to be split */
1110 *(rp
++) = cpu_to_le32(RISC_WRITE
|sol
|
1111 (sg_dma_len(sg
)-offset
));
1112 *(rp
++) = cpu_to_le32(sg_dma_address(sg
)+offset
);
1113 *(rp
++) = cpu_to_le32(0); /* bits 63-32 */
1114 todo
-= (sg_dma_len(sg
)-offset
);
1117 while (todo
> sg_dma_len(sg
)) {
1118 *(rp
++) = cpu_to_le32(RISC_WRITE
|
1120 *(rp
++) = cpu_to_le32(sg_dma_address(sg
));
1121 *(rp
++) = cpu_to_le32(0); /* bits 63-32 */
1122 todo
-= sg_dma_len(sg
);
1125 *(rp
++) = cpu_to_le32(RISC_WRITE
|RISC_EOL
|todo
);
1126 *(rp
++) = cpu_to_le32(sg_dma_address(sg
));
1127 *(rp
++) = cpu_to_le32(0); /* bits 63-32 */
1136 int cx23885_risc_buffer(struct pci_dev
*pci
, struct cx23885_riscmem
*risc
,
1137 struct scatterlist
*sglist
, unsigned int top_offset
,
1138 unsigned int bottom_offset
, unsigned int bpl
,
1139 unsigned int padding
, unsigned int lines
)
1141 u32 instructions
, fields
;
1145 if (UNSET
!= top_offset
)
1147 if (UNSET
!= bottom_offset
)
1150 /* estimate risc mem: worst case is one write per page border +
1151 one write per scan line + syncs + jump (all 2 dwords). Padding
1152 can cause next bpl to start close to a page border. First DMA
1153 region may be smaller than PAGE_SIZE */
1154 /* write and jump need and extra dword */
1155 instructions
= fields
* (1 + ((bpl
+ padding
) * lines
)
1156 / PAGE_SIZE
+ lines
);
1158 risc
->size
= instructions
* 12;
1159 risc
->cpu
= pci_alloc_consistent(pci
, risc
->size
, &risc
->dma
);
1160 if (risc
->cpu
== NULL
)
1163 /* write risc instructions */
1165 if (UNSET
!= top_offset
)
1166 rp
= cx23885_risc_field(rp
, sglist
, top_offset
, 0,
1167 bpl
, padding
, lines
, 0, true);
1168 if (UNSET
!= bottom_offset
)
1169 rp
= cx23885_risc_field(rp
, sglist
, bottom_offset
, 0x200,
1170 bpl
, padding
, lines
, 0, UNSET
== top_offset
);
1172 /* save pointer to jmp instruction address */
1174 BUG_ON((risc
->jmp
- risc
->cpu
+ 2) * sizeof(*risc
->cpu
) > risc
->size
);
1178 int cx23885_risc_databuffer(struct pci_dev
*pci
,
1179 struct cx23885_riscmem
*risc
,
1180 struct scatterlist
*sglist
,
1182 unsigned int lines
, unsigned int lpi
)
1187 /* estimate risc mem: worst case is one write per page border +
1188 one write per scan line + syncs + jump (all 2 dwords). Here
1189 there is no padding and no sync. First DMA region may be smaller
1191 /* Jump and write need an extra dword */
1192 instructions
= 1 + (bpl
* lines
) / PAGE_SIZE
+ lines
;
1195 risc
->size
= instructions
* 12;
1196 risc
->cpu
= pci_alloc_consistent(pci
, risc
->size
, &risc
->dma
);
1197 if (risc
->cpu
== NULL
)
1200 /* write risc instructions */
1202 rp
= cx23885_risc_field(rp
, sglist
, 0, NO_SYNC_LINE
,
1203 bpl
, 0, lines
, lpi
, lpi
== 0);
1205 /* save pointer to jmp instruction address */
1207 BUG_ON((risc
->jmp
- risc
->cpu
+ 2) * sizeof(*risc
->cpu
) > risc
->size
);
1211 int cx23885_risc_vbibuffer(struct pci_dev
*pci
, struct cx23885_riscmem
*risc
,
1212 struct scatterlist
*sglist
, unsigned int top_offset
,
1213 unsigned int bottom_offset
, unsigned int bpl
,
1214 unsigned int padding
, unsigned int lines
)
1216 u32 instructions
, fields
;
1220 if (UNSET
!= top_offset
)
1222 if (UNSET
!= bottom_offset
)
1225 /* estimate risc mem: worst case is one write per page border +
1226 one write per scan line + syncs + jump (all 2 dwords). Padding
1227 can cause next bpl to start close to a page border. First DMA
1228 region may be smaller than PAGE_SIZE */
1229 /* write and jump need and extra dword */
1230 instructions
= fields
* (1 + ((bpl
+ padding
) * lines
)
1231 / PAGE_SIZE
+ lines
);
1233 risc
->size
= instructions
* 12;
1234 risc
->cpu
= pci_alloc_consistent(pci
, risc
->size
, &risc
->dma
);
1235 if (risc
->cpu
== NULL
)
1237 /* write risc instructions */
1240 /* Sync to line 6, so US CC line 21 will appear in line '12'
1241 * in the userland vbi payload */
1242 if (UNSET
!= top_offset
)
1243 rp
= cx23885_risc_field(rp
, sglist
, top_offset
, 0,
1244 bpl
, padding
, lines
, 0, true);
1246 if (UNSET
!= bottom_offset
)
1247 rp
= cx23885_risc_field(rp
, sglist
, bottom_offset
, 0x200,
1248 bpl
, padding
, lines
, 0, UNSET
== top_offset
);
1252 /* save pointer to jmp instruction address */
1254 BUG_ON((risc
->jmp
- risc
->cpu
+ 2) * sizeof(*risc
->cpu
) > risc
->size
);
1259 void cx23885_free_buffer(struct cx23885_dev
*dev
, struct cx23885_buffer
*buf
)
1261 struct cx23885_riscmem
*risc
= &buf
->risc
;
1263 BUG_ON(in_interrupt());
1264 pci_free_consistent(dev
->pci
, risc
->size
, risc
->cpu
, risc
->dma
);
1267 static void cx23885_tsport_reg_dump(struct cx23885_tsport
*port
)
1269 struct cx23885_dev
*dev
= port
->dev
;
1271 dprintk(1, "%s() Register Dump\n", __func__
);
1272 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__
,
1273 cx_read(DEV_CNTRL2
));
1274 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__
,
1275 cx23885_irq_get_mask(dev
));
1276 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__
,
1277 cx_read(AUDIO_INT_INT_MSK
));
1278 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__
,
1279 cx_read(AUD_INT_DMA_CTL
));
1280 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__
,
1281 cx_read(AUDIO_EXT_INT_MSK
));
1282 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__
,
1283 cx_read(AUD_EXT_DMA_CTL
));
1284 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__
,
1286 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__
,
1287 cx_read(ALT_PIN_OUT_SEL
));
1288 dprintk(1, "%s() GPIO2 0x%08X\n", __func__
,
1290 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__
,
1291 port
->reg_gpcnt
, cx_read(port
->reg_gpcnt
));
1292 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__
,
1293 port
->reg_gpcnt_ctl
, cx_read(port
->reg_gpcnt_ctl
));
1294 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__
,
1295 port
->reg_dma_ctl
, cx_read(port
->reg_dma_ctl
));
1296 if (port
->reg_src_sel
)
1297 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__
,
1298 port
->reg_src_sel
, cx_read(port
->reg_src_sel
));
1299 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__
,
1300 port
->reg_lngth
, cx_read(port
->reg_lngth
));
1301 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__
,
1302 port
->reg_hw_sop_ctrl
, cx_read(port
->reg_hw_sop_ctrl
));
1303 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__
,
1304 port
->reg_gen_ctrl
, cx_read(port
->reg_gen_ctrl
));
1305 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__
,
1306 port
->reg_bd_pkt_status
, cx_read(port
->reg_bd_pkt_status
));
1307 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__
,
1308 port
->reg_sop_status
, cx_read(port
->reg_sop_status
));
1309 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__
,
1310 port
->reg_fifo_ovfl_stat
, cx_read(port
->reg_fifo_ovfl_stat
));
1311 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__
,
1312 port
->reg_vld_misc
, cx_read(port
->reg_vld_misc
));
1313 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__
,
1314 port
->reg_ts_clk_en
, cx_read(port
->reg_ts_clk_en
));
1315 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__
,
1316 port
->reg_ts_int_msk
, cx_read(port
->reg_ts_int_msk
));
1319 int cx23885_start_dma(struct cx23885_tsport
*port
,
1320 struct cx23885_dmaqueue
*q
,
1321 struct cx23885_buffer
*buf
)
1323 struct cx23885_dev
*dev
= port
->dev
;
1326 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__
,
1327 dev
->width
, dev
->height
, dev
->field
);
1329 /* Stop the fifo and risc engine for this port */
1330 cx_clear(port
->reg_dma_ctl
, port
->dma_ctl_val
);
1332 /* setup fifo + format */
1333 cx23885_sram_channel_setup(dev
,
1334 &dev
->sram_channels
[port
->sram_chno
],
1335 port
->ts_packet_size
, buf
->risc
.dma
);
1337 cx23885_sram_channel_dump(dev
,
1338 &dev
->sram_channels
[port
->sram_chno
]);
1339 cx23885_risc_disasm(port
, &buf
->risc
);
1342 /* write TS length to chip */
1343 cx_write(port
->reg_lngth
, port
->ts_packet_size
);
1345 if ((!(cx23885_boards
[dev
->board
].portb
& CX23885_MPEG_DVB
)) &&
1346 (!(cx23885_boards
[dev
->board
].portc
& CX23885_MPEG_DVB
))) {
1347 printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1349 cx23885_boards
[dev
->board
].portb
,
1350 cx23885_boards
[dev
->board
].portc
);
1354 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
)
1355 cx23885_av_clk(dev
, 0);
1359 /* If the port supports SRC SELECT, configure it */
1360 if (port
->reg_src_sel
)
1361 cx_write(port
->reg_src_sel
, port
->src_sel_val
);
1363 cx_write(port
->reg_hw_sop_ctrl
, port
->hw_sop_ctrl_val
);
1364 cx_write(port
->reg_ts_clk_en
, port
->ts_clk_en_val
);
1365 cx_write(port
->reg_vld_misc
, port
->vld_misc_val
);
1366 cx_write(port
->reg_gen_ctrl
, port
->gen_ctrl_val
);
1369 /* NOTE: this is 2 (reserved) for portb, does it matter? */
1370 /* reset counter to zero */
1371 cx_write(port
->reg_gpcnt_ctl
, 3);
1374 /* Set VIDB pins to input */
1375 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_DVB
) {
1376 reg
= cx_read(PAD_CTRL
);
1377 reg
&= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1378 cx_write(PAD_CTRL
, reg
);
1381 /* Set VIDC pins to input */
1382 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_DVB
) {
1383 reg
= cx_read(PAD_CTRL
);
1384 reg
&= ~0x4; /* Clear TS2_SOP_OE */
1385 cx_write(PAD_CTRL
, reg
);
1388 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
) {
1390 reg
= cx_read(PAD_CTRL
);
1391 reg
= reg
& ~0x1; /* Clear TS1_OE */
1393 /* FIXME, bit 2 writing here is questionable */
1394 /* set TS1_SOP_OE and TS1_OE_HI */
1396 cx_write(PAD_CTRL
, reg
);
1398 /* FIXME and these two registers should be documented. */
1399 cx_write(CLK_DELAY
, cx_read(CLK_DELAY
) | 0x80000011);
1400 cx_write(ALT_PIN_OUT_SEL
, 0x10100045);
1403 switch (dev
->bridge
) {
1404 case CX23885_BRIDGE_885
:
1405 case CX23885_BRIDGE_887
:
1406 case CX23885_BRIDGE_888
:
1408 dprintk(1, "%s() enabling TS int's and DMA\n", __func__
);
1409 cx_set(port
->reg_ts_int_msk
, port
->ts_int_msk_val
);
1410 cx_set(port
->reg_dma_ctl
, port
->dma_ctl_val
);
1411 cx23885_irq_add(dev
, port
->pci_irqmask
);
1412 cx23885_irq_enable_all(dev
);
1418 cx_set(DEV_CNTRL2
, (1<<5)); /* Enable RISC controller */
1420 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
)
1421 cx23885_av_clk(dev
, 1);
1424 cx23885_tsport_reg_dump(port
);
1429 static int cx23885_stop_dma(struct cx23885_tsport
*port
)
1431 struct cx23885_dev
*dev
= port
->dev
;
1434 dprintk(1, "%s()\n", __func__
);
1436 /* Stop interrupts and DMA */
1437 cx_clear(port
->reg_ts_int_msk
, port
->ts_int_msk_val
);
1438 cx_clear(port
->reg_dma_ctl
, port
->dma_ctl_val
);
1440 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
) {
1442 reg
= cx_read(PAD_CTRL
);
1447 /* clear TS1_SOP_OE and TS1_OE_HI */
1449 cx_write(PAD_CTRL
, reg
);
1450 cx_write(port
->reg_src_sel
, 0);
1451 cx_write(port
->reg_gen_ctrl
, 8);
1455 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
)
1456 cx23885_av_clk(dev
, 0);
1461 /* ------------------------------------------------------------------ */
1463 int cx23885_buf_prepare(struct cx23885_buffer
*buf
, struct cx23885_tsport
*port
)
1465 struct cx23885_dev
*dev
= port
->dev
;
1466 int size
= port
->ts_packet_size
* port
->ts_packet_count
;
1467 struct sg_table
*sgt
= vb2_dma_sg_plane_desc(&buf
->vb
.vb2_buf
, 0);
1469 dprintk(1, "%s: %p\n", __func__
, buf
);
1470 if (vb2_plane_size(&buf
->vb
.vb2_buf
, 0) < size
)
1472 vb2_set_plane_payload(&buf
->vb
.vb2_buf
, 0, size
);
1474 cx23885_risc_databuffer(dev
->pci
, &buf
->risc
,
1476 port
->ts_packet_size
, port
->ts_packet_count
, 0);
1481 * The risc program for each buffer works as follows: it starts with a simple
1482 * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
1483 * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
1484 * the initial JUMP).
1486 * This is the risc program of the first buffer to be queued if the active list
1487 * is empty and it just keeps DMAing this buffer without generating any
1490 * If a new buffer is added then the initial JUMP in the code for that buffer
1491 * will generate an interrupt which signals that the previous buffer has been
1492 * DMAed successfully and that it can be returned to userspace.
1494 * It also sets the final jump of the previous buffer to the start of the new
1495 * buffer, thus chaining the new buffer into the DMA chain. This is a single
1496 * atomic u32 write, so there is no race condition.
1498 * The end-result of all this that you only get an interrupt when a buffer
1499 * is ready, so the control flow is very easy.
1501 void cx23885_buf_queue(struct cx23885_tsport
*port
, struct cx23885_buffer
*buf
)
1503 struct cx23885_buffer
*prev
;
1504 struct cx23885_dev
*dev
= port
->dev
;
1505 struct cx23885_dmaqueue
*cx88q
= &port
->mpegq
;
1506 unsigned long flags
;
1508 buf
->risc
.cpu
[1] = cpu_to_le32(buf
->risc
.dma
+ 12);
1509 buf
->risc
.jmp
[0] = cpu_to_le32(RISC_JUMP
| RISC_CNT_INC
);
1510 buf
->risc
.jmp
[1] = cpu_to_le32(buf
->risc
.dma
+ 12);
1511 buf
->risc
.jmp
[2] = cpu_to_le32(0); /* bits 63-32 */
1513 spin_lock_irqsave(&dev
->slock
, flags
);
1514 if (list_empty(&cx88q
->active
)) {
1515 list_add_tail(&buf
->queue
, &cx88q
->active
);
1516 dprintk(1, "[%p/%d] %s - first active\n",
1517 buf
, buf
->vb
.vb2_buf
.index
, __func__
);
1519 buf
->risc
.cpu
[0] |= cpu_to_le32(RISC_IRQ1
);
1520 prev
= list_entry(cx88q
->active
.prev
, struct cx23885_buffer
,
1522 list_add_tail(&buf
->queue
, &cx88q
->active
);
1523 prev
->risc
.jmp
[1] = cpu_to_le32(buf
->risc
.dma
);
1524 dprintk(1, "[%p/%d] %s - append to active\n",
1525 buf
, buf
->vb
.vb2_buf
.index
, __func__
);
1527 spin_unlock_irqrestore(&dev
->slock
, flags
);
1530 /* ----------------------------------------------------------- */
1532 static void do_cancel_buffers(struct cx23885_tsport
*port
, char *reason
)
1534 struct cx23885_dev
*dev
= port
->dev
;
1535 struct cx23885_dmaqueue
*q
= &port
->mpegq
;
1536 struct cx23885_buffer
*buf
;
1537 unsigned long flags
;
1539 spin_lock_irqsave(&port
->slock
, flags
);
1540 while (!list_empty(&q
->active
)) {
1541 buf
= list_entry(q
->active
.next
, struct cx23885_buffer
,
1543 list_del(&buf
->queue
);
1544 vb2_buffer_done(&buf
->vb
.vb2_buf
, VB2_BUF_STATE_ERROR
);
1545 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1546 buf
, buf
->vb
.vb2_buf
.index
, reason
,
1547 (unsigned long)buf
->risc
.dma
);
1549 spin_unlock_irqrestore(&port
->slock
, flags
);
1552 void cx23885_cancel_buffers(struct cx23885_tsport
*port
)
1554 struct cx23885_dev
*dev
= port
->dev
;
1556 dprintk(1, "%s()\n", __func__
);
1557 cx23885_stop_dma(port
);
1558 do_cancel_buffers(port
, "cancel");
1561 int cx23885_irq_417(struct cx23885_dev
*dev
, u32 status
)
1563 /* FIXME: port1 assumption here. */
1564 struct cx23885_tsport
*port
= &dev
->ts1
;
1571 count
= cx_read(port
->reg_gpcnt
);
1572 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1573 status
, cx_read(port
->reg_ts_int_msk
), count
);
1575 if ((status
& VID_B_MSK_BAD_PKT
) ||
1576 (status
& VID_B_MSK_OPC_ERR
) ||
1577 (status
& VID_B_MSK_VBI_OPC_ERR
) ||
1578 (status
& VID_B_MSK_SYNC
) ||
1579 (status
& VID_B_MSK_VBI_SYNC
) ||
1580 (status
& VID_B_MSK_OF
) ||
1581 (status
& VID_B_MSK_VBI_OF
)) {
1582 printk(KERN_ERR
"%s: V4L mpeg risc op code error, status "
1583 "= 0x%x\n", dev
->name
, status
);
1584 if (status
& VID_B_MSK_BAD_PKT
)
1585 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1586 if (status
& VID_B_MSK_OPC_ERR
)
1587 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1588 if (status
& VID_B_MSK_VBI_OPC_ERR
)
1589 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1590 if (status
& VID_B_MSK_SYNC
)
1591 dprintk(1, " VID_B_MSK_SYNC\n");
1592 if (status
& VID_B_MSK_VBI_SYNC
)
1593 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1594 if (status
& VID_B_MSK_OF
)
1595 dprintk(1, " VID_B_MSK_OF\n");
1596 if (status
& VID_B_MSK_VBI_OF
)
1597 dprintk(1, " VID_B_MSK_VBI_OF\n");
1599 cx_clear(port
->reg_dma_ctl
, port
->dma_ctl_val
);
1600 cx23885_sram_channel_dump(dev
,
1601 &dev
->sram_channels
[port
->sram_chno
]);
1602 cx23885_417_check_encoder(dev
);
1603 } else if (status
& VID_B_MSK_RISCI1
) {
1604 dprintk(7, " VID_B_MSK_RISCI1\n");
1605 spin_lock(&port
->slock
);
1606 cx23885_wakeup(port
, &port
->mpegq
, count
);
1607 spin_unlock(&port
->slock
);
1610 cx_write(port
->reg_ts_int_stat
, status
);
1617 static int cx23885_irq_ts(struct cx23885_tsport
*port
, u32 status
)
1619 struct cx23885_dev
*dev
= port
->dev
;
1623 if ((status
& VID_BC_MSK_OPC_ERR
) ||
1624 (status
& VID_BC_MSK_BAD_PKT
) ||
1625 (status
& VID_BC_MSK_SYNC
) ||
1626 (status
& VID_BC_MSK_OF
)) {
1628 if (status
& VID_BC_MSK_OPC_ERR
)
1629 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1630 VID_BC_MSK_OPC_ERR
);
1632 if (status
& VID_BC_MSK_BAD_PKT
)
1633 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1634 VID_BC_MSK_BAD_PKT
);
1636 if (status
& VID_BC_MSK_SYNC
)
1637 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1640 if (status
& VID_BC_MSK_OF
)
1641 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1644 printk(KERN_ERR
"%s: mpeg risc op code error\n", dev
->name
);
1646 cx_clear(port
->reg_dma_ctl
, port
->dma_ctl_val
);
1647 cx23885_sram_channel_dump(dev
,
1648 &dev
->sram_channels
[port
->sram_chno
]);
1650 } else if (status
& VID_BC_MSK_RISCI1
) {
1652 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1
);
1654 spin_lock(&port
->slock
);
1655 count
= cx_read(port
->reg_gpcnt
);
1656 cx23885_wakeup(port
, &port
->mpegq
, count
);
1657 spin_unlock(&port
->slock
);
1661 cx_write(port
->reg_ts_int_stat
, status
);
1668 static irqreturn_t
cx23885_irq(int irq
, void *dev_id
)
1670 struct cx23885_dev
*dev
= dev_id
;
1671 struct cx23885_tsport
*ts1
= &dev
->ts1
;
1672 struct cx23885_tsport
*ts2
= &dev
->ts2
;
1673 u32 pci_status
, pci_mask
;
1674 u32 vida_status
, vida_mask
;
1675 u32 audint_status
, audint_mask
;
1676 u32 ts1_status
, ts1_mask
;
1677 u32 ts2_status
, ts2_mask
;
1678 int vida_count
= 0, ts1_count
= 0, ts2_count
= 0, handled
= 0;
1679 int audint_count
= 0;
1680 bool subdev_handled
;
1682 pci_status
= cx_read(PCI_INT_STAT
);
1683 pci_mask
= cx23885_irq_get_mask(dev
);
1684 vida_status
= cx_read(VID_A_INT_STAT
);
1685 vida_mask
= cx_read(VID_A_INT_MSK
);
1686 audint_status
= cx_read(AUDIO_INT_INT_STAT
);
1687 audint_mask
= cx_read(AUDIO_INT_INT_MSK
);
1688 ts1_status
= cx_read(VID_B_INT_STAT
);
1689 ts1_mask
= cx_read(VID_B_INT_MSK
);
1690 ts2_status
= cx_read(VID_C_INT_STAT
);
1691 ts2_mask
= cx_read(VID_C_INT_MSK
);
1693 if ((pci_status
== 0) && (ts2_status
== 0) && (ts1_status
== 0))
1696 vida_count
= cx_read(VID_A_GPCNT
);
1697 audint_count
= cx_read(AUD_INT_A_GPCNT
);
1698 ts1_count
= cx_read(ts1
->reg_gpcnt
);
1699 ts2_count
= cx_read(ts2
->reg_gpcnt
);
1700 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1701 pci_status
, pci_mask
);
1702 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1703 vida_status
, vida_mask
, vida_count
);
1704 dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1705 audint_status
, audint_mask
, audint_count
);
1706 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1707 ts1_status
, ts1_mask
, ts1_count
);
1708 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1709 ts2_status
, ts2_mask
, ts2_count
);
1711 if (pci_status
& (PCI_MSK_RISC_RD
| PCI_MSK_RISC_WR
|
1712 PCI_MSK_AL_RD
| PCI_MSK_AL_WR
| PCI_MSK_APB_DMA
|
1713 PCI_MSK_VID_C
| PCI_MSK_VID_B
| PCI_MSK_VID_A
|
1714 PCI_MSK_AUD_INT
| PCI_MSK_AUD_EXT
|
1715 PCI_MSK_GPIO0
| PCI_MSK_GPIO1
|
1716 PCI_MSK_AV_CORE
| PCI_MSK_IR
)) {
1718 if (pci_status
& PCI_MSK_RISC_RD
)
1719 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1722 if (pci_status
& PCI_MSK_RISC_WR
)
1723 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1726 if (pci_status
& PCI_MSK_AL_RD
)
1727 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1730 if (pci_status
& PCI_MSK_AL_WR
)
1731 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1734 if (pci_status
& PCI_MSK_APB_DMA
)
1735 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1738 if (pci_status
& PCI_MSK_VID_C
)
1739 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1742 if (pci_status
& PCI_MSK_VID_B
)
1743 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1746 if (pci_status
& PCI_MSK_VID_A
)
1747 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1750 if (pci_status
& PCI_MSK_AUD_INT
)
1751 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1754 if (pci_status
& PCI_MSK_AUD_EXT
)
1755 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1758 if (pci_status
& PCI_MSK_GPIO0
)
1759 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1762 if (pci_status
& PCI_MSK_GPIO1
)
1763 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1766 if (pci_status
& PCI_MSK_AV_CORE
)
1767 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1770 if (pci_status
& PCI_MSK_IR
)
1771 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1775 if (cx23885_boards
[dev
->board
].ci_type
== 1 &&
1776 (pci_status
& (PCI_MSK_GPIO1
| PCI_MSK_GPIO0
)))
1777 handled
+= netup_ci_slot_status(dev
, pci_status
);
1779 if (cx23885_boards
[dev
->board
].ci_type
== 2 &&
1780 (pci_status
& PCI_MSK_GPIO0
))
1781 handled
+= altera_ci_irq(dev
);
1784 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_DVB
)
1785 handled
+= cx23885_irq_ts(ts1
, ts1_status
);
1787 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
)
1788 handled
+= cx23885_irq_417(dev
, ts1_status
);
1792 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_DVB
)
1793 handled
+= cx23885_irq_ts(ts2
, ts2_status
);
1795 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_ENCODER
)
1796 handled
+= cx23885_irq_417(dev
, ts2_status
);
1800 handled
+= cx23885_video_irq(dev
, vida_status
);
1803 handled
+= cx23885_audio_irq(dev
, audint_status
, audint_mask
);
1805 if (pci_status
& PCI_MSK_IR
) {
1806 subdev_handled
= false;
1807 v4l2_subdev_call(dev
->sd_ir
, core
, interrupt_service_routine
,
1808 pci_status
, &subdev_handled
);
1813 if ((pci_status
& pci_mask
) & PCI_MSK_AV_CORE
) {
1814 cx23885_irq_disable(dev
, PCI_MSK_AV_CORE
);
1815 schedule_work(&dev
->cx25840_work
);
1820 cx_write(PCI_INT_STAT
, pci_status
);
1822 return IRQ_RETVAL(handled
);
1825 static void cx23885_v4l2_dev_notify(struct v4l2_subdev
*sd
,
1826 unsigned int notification
, void *arg
)
1828 struct cx23885_dev
*dev
;
1833 dev
= to_cx23885(sd
->v4l2_dev
);
1835 switch (notification
) {
1836 case V4L2_SUBDEV_IR_RX_NOTIFY
: /* Possibly called in an IRQ context */
1837 if (sd
== dev
->sd_ir
)
1838 cx23885_ir_rx_v4l2_dev_notify(sd
, *(u32
*)arg
);
1840 case V4L2_SUBDEV_IR_TX_NOTIFY
: /* Possibly called in an IRQ context */
1841 if (sd
== dev
->sd_ir
)
1842 cx23885_ir_tx_v4l2_dev_notify(sd
, *(u32
*)arg
);
1847 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev
*dev
)
1849 INIT_WORK(&dev
->cx25840_work
, cx23885_av_work_handler
);
1850 INIT_WORK(&dev
->ir_rx_work
, cx23885_ir_rx_work_handler
);
1851 INIT_WORK(&dev
->ir_tx_work
, cx23885_ir_tx_work_handler
);
1852 dev
->v4l2_dev
.notify
= cx23885_v4l2_dev_notify
;
1855 static inline int encoder_on_portb(struct cx23885_dev
*dev
)
1857 return cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
;
1860 static inline int encoder_on_portc(struct cx23885_dev
*dev
)
1862 return cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_ENCODER
;
1865 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1866 * registers depending on the board configuration (and whether the
1867 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1868 * be pushed into the correct hardware register, regardless of the
1869 * physical location. Certain registers are shared so we sanity check
1870 * and report errors if we think we're tampering with a GPIo that might
1871 * be assigned to the encoder (and used for the host bus).
1873 * GPIO 2 thru 0 - On the cx23885 bridge
1874 * GPIO 18 thru 3 - On the cx23417 host bus interface
1875 * GPIO 23 thru 19 - On the cx25840 a/v core
1877 void cx23885_gpio_set(struct cx23885_dev
*dev
, u32 mask
)
1880 cx_set(GP0_IO
, mask
& 0x7);
1882 if (mask
& 0x0007fff8) {
1883 if (encoder_on_portb(dev
) || encoder_on_portc(dev
))
1885 "%s: Setting GPIO on encoder ports\n",
1887 cx_set(MC417_RWD
, (mask
& 0x0007fff8) >> 3);
1891 if (mask
& 0x00f80000)
1892 printk(KERN_INFO
"%s: Unsupported\n", dev
->name
);
1895 void cx23885_gpio_clear(struct cx23885_dev
*dev
, u32 mask
)
1897 if (mask
& 0x00000007)
1898 cx_clear(GP0_IO
, mask
& 0x7);
1900 if (mask
& 0x0007fff8) {
1901 if (encoder_on_portb(dev
) || encoder_on_portc(dev
))
1903 "%s: Clearing GPIO moving on encoder ports\n",
1905 cx_clear(MC417_RWD
, (mask
& 0x7fff8) >> 3);
1909 if (mask
& 0x00f80000)
1910 printk(KERN_INFO
"%s: Unsupported\n", dev
->name
);
1913 u32
cx23885_gpio_get(struct cx23885_dev
*dev
, u32 mask
)
1915 if (mask
& 0x00000007)
1916 return (cx_read(GP0_IO
) >> 8) & mask
& 0x7;
1918 if (mask
& 0x0007fff8) {
1919 if (encoder_on_portb(dev
) || encoder_on_portc(dev
))
1921 "%s: Reading GPIO moving on encoder ports\n",
1923 return (cx_read(MC417_RWD
) & ((mask
& 0x7fff8) >> 3)) << 3;
1927 if (mask
& 0x00f80000)
1928 printk(KERN_INFO
"%s: Unsupported\n", dev
->name
);
1933 void cx23885_gpio_enable(struct cx23885_dev
*dev
, u32 mask
, int asoutput
)
1935 if ((mask
& 0x00000007) && asoutput
)
1936 cx_set(GP0_IO
, (mask
& 0x7) << 16);
1937 else if ((mask
& 0x00000007) && !asoutput
)
1938 cx_clear(GP0_IO
, (mask
& 0x7) << 16);
1940 if (mask
& 0x0007fff8) {
1941 if (encoder_on_portb(dev
) || encoder_on_portc(dev
))
1943 "%s: Enabling GPIO on encoder ports\n",
1947 /* MC417_OEN is active low for output, write 1 for an input */
1948 if ((mask
& 0x0007fff8) && asoutput
)
1949 cx_clear(MC417_OEN
, (mask
& 0x7fff8) >> 3);
1951 else if ((mask
& 0x0007fff8) && !asoutput
)
1952 cx_set(MC417_OEN
, (mask
& 0x7fff8) >> 3);
1957 static int cx23885_initdev(struct pci_dev
*pci_dev
,
1958 const struct pci_device_id
*pci_id
)
1960 struct cx23885_dev
*dev
;
1961 struct v4l2_ctrl_handler
*hdl
;
1964 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
1968 err
= v4l2_device_register(&pci_dev
->dev
, &dev
->v4l2_dev
);
1972 hdl
= &dev
->ctrl_handler
;
1973 v4l2_ctrl_handler_init(hdl
, 6);
1978 dev
->v4l2_dev
.ctrl_handler
= hdl
;
1980 /* Prepare to handle notifications from subdevices */
1981 cx23885_v4l2_dev_notify_init(dev
);
1985 if (pci_enable_device(pci_dev
)) {
1990 if (cx23885_dev_setup(dev
) < 0) {
1995 /* print pci info */
1996 dev
->pci_rev
= pci_dev
->revision
;
1997 pci_read_config_byte(pci_dev
, PCI_LATENCY_TIMER
, &dev
->pci_lat
);
1998 printk(KERN_INFO
"%s/0: found at %s, rev: %d, irq: %d, "
1999 "latency: %d, mmio: 0x%llx\n", dev
->name
,
2000 pci_name(pci_dev
), dev
->pci_rev
, pci_dev
->irq
,
2002 (unsigned long long)pci_resource_start(pci_dev
, 0));
2004 pci_set_master(pci_dev
);
2005 err
= pci_set_dma_mask(pci_dev
, 0xffffffff);
2007 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev
->name
);
2011 dev
->alloc_ctx
= vb2_dma_sg_init_ctx(&pci_dev
->dev
);
2012 if (IS_ERR(dev
->alloc_ctx
)) {
2013 err
= PTR_ERR(dev
->alloc_ctx
);
2016 err
= request_irq(pci_dev
->irq
, cx23885_irq
,
2017 IRQF_SHARED
, dev
->name
, dev
);
2019 printk(KERN_ERR
"%s: can't get IRQ %d\n",
2020 dev
->name
, pci_dev
->irq
);
2024 switch (dev
->board
) {
2025 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI
:
2026 cx23885_irq_add_enable(dev
, PCI_MSK_GPIO1
| PCI_MSK_GPIO0
);
2028 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF
:
2029 cx23885_irq_add_enable(dev
, PCI_MSK_GPIO0
);
2034 * The CX2388[58] IR controller can start firing interrupts when
2035 * enabled, so these have to take place after the cx23885_irq() handler
2036 * is hooked up by the call to request_irq() above.
2038 cx23885_ir_pci_int_enable(dev
);
2039 cx23885_input_init(dev
);
2044 vb2_dma_sg_cleanup_ctx(dev
->alloc_ctx
);
2046 cx23885_dev_unregister(dev
);
2048 v4l2_ctrl_handler_free(hdl
);
2049 v4l2_device_unregister(&dev
->v4l2_dev
);
2055 static void cx23885_finidev(struct pci_dev
*pci_dev
)
2057 struct v4l2_device
*v4l2_dev
= pci_get_drvdata(pci_dev
);
2058 struct cx23885_dev
*dev
= to_cx23885(v4l2_dev
);
2060 cx23885_input_fini(dev
);
2061 cx23885_ir_fini(dev
);
2063 cx23885_shutdown(dev
);
2065 /* unregister stuff */
2066 free_irq(pci_dev
->irq
, dev
);
2068 pci_disable_device(pci_dev
);
2070 cx23885_dev_unregister(dev
);
2071 vb2_dma_sg_cleanup_ctx(dev
->alloc_ctx
);
2072 v4l2_ctrl_handler_free(&dev
->ctrl_handler
);
2073 v4l2_device_unregister(v4l2_dev
);
2077 static struct pci_device_id cx23885_pci_tbl
[] = {
2082 .subvendor
= PCI_ANY_ID
,
2083 .subdevice
= PCI_ANY_ID
,
2088 .subvendor
= PCI_ANY_ID
,
2089 .subdevice
= PCI_ANY_ID
,
2091 /* --- end of list --- */
2094 MODULE_DEVICE_TABLE(pci
, cx23885_pci_tbl
);
2096 static struct pci_driver cx23885_pci_driver
= {
2098 .id_table
= cx23885_pci_tbl
,
2099 .probe
= cx23885_initdev
,
2100 .remove
= cx23885_finidev
,
2106 static int __init
cx23885_init(void)
2108 printk(KERN_INFO
"cx23885 driver version %s loaded\n",
2110 return pci_register_driver(&cx23885_pci_driver
);
2113 static void __exit
cx23885_fini(void)
2115 pci_unregister_driver(&cx23885_pci_driver
);
2118 module_init(cx23885_init
);
2119 module_exit(cx23885_fini
);