1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Driver for the Conexant CX23885 PCIe bridge
5 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/kmod.h>
15 #include <linux/kernel.h>
16 #include <linux/pci.h>
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/delay.h>
20 #include <asm/div64.h>
21 #include <linux/firmware.h>
24 #include "altera-ci.h"
25 #include "cx23888-ir.h"
26 #include "cx23885-ir.h"
27 #include "cx23885-av.h"
28 #include "cx23885-input.h"
30 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
31 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
32 MODULE_LICENSE("GPL");
33 MODULE_VERSION(CX23885_VERSION
);
36 * Some platforms have been found to require periodic resetting of the DMA
37 * engine. Ryzen and XEON platforms are known to be affected. The symptom
38 * encountered is "mpeg risc op code error". Only Ryzen platforms employ
39 * this workaround if the option equals 1. The workaround can be explicitly
40 * disabled for all platforms by setting to 0, the workaround can be forced
41 * on for any platform by setting to 2.
43 static unsigned int dma_reset_workaround
= 1;
44 module_param(dma_reset_workaround
, int, 0644);
45 MODULE_PARM_DESC(dma_reset_workaround
, "periodic RiSC dma engine reset; 0-force disable, 1-driver detect (default), 2-force enable");
47 static unsigned int debug
;
48 module_param(debug
, int, 0644);
49 MODULE_PARM_DESC(debug
, "enable debug messages");
51 static unsigned int card
[] = {[0 ... (CX23885_MAXBOARDS
- 1)] = UNSET
};
52 module_param_array(card
, int, NULL
, 0444);
53 MODULE_PARM_DESC(card
, "card type");
55 #define dprintk(level, fmt, arg...)\
56 do { if (debug >= level)\
57 printk(KERN_DEBUG pr_fmt("%s: " fmt), \
61 static unsigned int cx23885_devcount
;
63 #define NO_SYNC_LINE (-1U)
65 /* FIXME, these allocations will change when
66 * analog arrives. The be reviewed.
68 * 1 line = 16 bytes of CDT
70 * cdt size = 16 * linesize
75 * 0x00000000 0x00008fff FIFO clusters
76 * 0x00010000 0x000104af Channel Management Data Structures
77 * 0x000104b0 0x000104ff Free
78 * 0x00010500 0x000108bf 15 channels * iqsize
79 * 0x000108c0 0x000108ff Free
80 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
81 * 15 channels * (iqsize + (maxlines * linesize))
82 * 0x00010ea0 0x00010xxx Free
85 static struct sram_channel cx23885_sram_channels
[] = {
88 .cmds_start
= 0x10000,
89 .ctrl_start
= 0x10380,
93 .ptr1_reg
= DMA1_PTR1
,
94 .ptr2_reg
= DMA1_PTR2
,
95 .cnt1_reg
= DMA1_CNT1
,
96 .cnt2_reg
= DMA1_CNT2
,
105 .ptr1_reg
= DMA2_PTR1
,
106 .ptr2_reg
= DMA2_PTR2
,
107 .cnt1_reg
= DMA2_CNT1
,
108 .cnt2_reg
= DMA2_CNT2
,
112 .cmds_start
= 0x100A0,
113 .ctrl_start
= 0x10400,
115 .fifo_start
= 0x5000,
117 .ptr1_reg
= DMA3_PTR1
,
118 .ptr2_reg
= DMA3_PTR2
,
119 .cnt1_reg
= DMA3_CNT1
,
120 .cnt2_reg
= DMA3_CNT2
,
129 .ptr1_reg
= DMA4_PTR1
,
130 .ptr2_reg
= DMA4_PTR2
,
131 .cnt1_reg
= DMA4_CNT1
,
132 .cnt2_reg
= DMA4_CNT2
,
141 .ptr1_reg
= DMA5_PTR1
,
142 .ptr2_reg
= DMA5_PTR2
,
143 .cnt1_reg
= DMA5_CNT1
,
144 .cnt2_reg
= DMA5_CNT2
,
148 .cmds_start
= 0x10140,
149 .ctrl_start
= 0x10440,
151 .fifo_start
= 0x6000,
153 .ptr1_reg
= DMA5_PTR1
,
154 .ptr2_reg
= DMA5_PTR2
,
155 .cnt1_reg
= DMA5_CNT1
,
156 .cnt2_reg
= DMA5_CNT2
,
160 .cmds_start
= 0x10190,
161 .ctrl_start
= 0x10480,
163 .fifo_start
= 0x7000,
165 .ptr1_reg
= DMA6_PTR1
,
166 .ptr2_reg
= DMA6_PTR2
,
167 .cnt1_reg
= DMA6_CNT1
,
168 .cnt2_reg
= DMA6_CNT2
,
177 .ptr1_reg
= DMA7_PTR1
,
178 .ptr2_reg
= DMA7_PTR2
,
179 .cnt1_reg
= DMA7_CNT1
,
180 .cnt2_reg
= DMA7_CNT2
,
189 .ptr1_reg
= DMA8_PTR1
,
190 .ptr2_reg
= DMA8_PTR2
,
191 .cnt1_reg
= DMA8_CNT1
,
192 .cnt2_reg
= DMA8_CNT2
,
196 static struct sram_channel cx23887_sram_channels
[] = {
199 .cmds_start
= 0x10000,
200 .ctrl_start
= 0x105b0,
204 .ptr1_reg
= DMA1_PTR1
,
205 .ptr2_reg
= DMA1_PTR2
,
206 .cnt1_reg
= DMA1_CNT1
,
207 .cnt2_reg
= DMA1_CNT2
,
210 .name
= "VID A (VBI)",
211 .cmds_start
= 0x10050,
212 .ctrl_start
= 0x105F0,
214 .fifo_start
= 0x3000,
216 .ptr1_reg
= DMA2_PTR1
,
217 .ptr2_reg
= DMA2_PTR2
,
218 .cnt1_reg
= DMA2_CNT1
,
219 .cnt2_reg
= DMA2_CNT2
,
223 .cmds_start
= 0x100A0,
224 .ctrl_start
= 0x10630,
226 .fifo_start
= 0x5000,
228 .ptr1_reg
= DMA3_PTR1
,
229 .ptr2_reg
= DMA3_PTR2
,
230 .cnt1_reg
= DMA3_CNT1
,
231 .cnt2_reg
= DMA3_CNT2
,
240 .ptr1_reg
= DMA4_PTR1
,
241 .ptr2_reg
= DMA4_PTR2
,
242 .cnt1_reg
= DMA4_CNT1
,
243 .cnt2_reg
= DMA4_CNT2
,
252 .ptr1_reg
= DMA5_PTR1
,
253 .ptr2_reg
= DMA5_PTR2
,
254 .cnt1_reg
= DMA5_CNT1
,
255 .cnt2_reg
= DMA5_CNT2
,
259 .cmds_start
= 0x10140,
260 .ctrl_start
= 0x10670,
262 .fifo_start
= 0x6000,
264 .ptr1_reg
= DMA5_PTR1
,
265 .ptr2_reg
= DMA5_PTR2
,
266 .cnt1_reg
= DMA5_CNT1
,
267 .cnt2_reg
= DMA5_CNT2
,
271 .cmds_start
= 0x10190,
272 .ctrl_start
= 0x106B0,
274 .fifo_start
= 0x7000,
276 .ptr1_reg
= DMA6_PTR1
,
277 .ptr2_reg
= DMA6_PTR2
,
278 .cnt1_reg
= DMA6_CNT1
,
279 .cnt2_reg
= DMA6_CNT2
,
288 .ptr1_reg
= DMA7_PTR1
,
289 .ptr2_reg
= DMA7_PTR2
,
290 .cnt1_reg
= DMA7_CNT1
,
291 .cnt2_reg
= DMA7_CNT2
,
300 .ptr1_reg
= DMA8_PTR1
,
301 .ptr2_reg
= DMA8_PTR2
,
302 .cnt1_reg
= DMA8_CNT1
,
303 .cnt2_reg
= DMA8_CNT2
,
307 static void cx23885_irq_add(struct cx23885_dev
*dev
, u32 mask
)
310 spin_lock_irqsave(&dev
->pci_irqmask_lock
, flags
);
312 dev
->pci_irqmask
|= mask
;
314 spin_unlock_irqrestore(&dev
->pci_irqmask_lock
, flags
);
317 void cx23885_irq_add_enable(struct cx23885_dev
*dev
, u32 mask
)
320 spin_lock_irqsave(&dev
->pci_irqmask_lock
, flags
);
322 dev
->pci_irqmask
|= mask
;
323 cx_set(PCI_INT_MSK
, mask
);
325 spin_unlock_irqrestore(&dev
->pci_irqmask_lock
, flags
);
328 void cx23885_irq_enable(struct cx23885_dev
*dev
, u32 mask
)
332 spin_lock_irqsave(&dev
->pci_irqmask_lock
, flags
);
334 v
= mask
& dev
->pci_irqmask
;
336 cx_set(PCI_INT_MSK
, v
);
338 spin_unlock_irqrestore(&dev
->pci_irqmask_lock
, flags
);
341 static inline void cx23885_irq_enable_all(struct cx23885_dev
*dev
)
343 cx23885_irq_enable(dev
, 0xffffffff);
346 void cx23885_irq_disable(struct cx23885_dev
*dev
, u32 mask
)
349 spin_lock_irqsave(&dev
->pci_irqmask_lock
, flags
);
351 cx_clear(PCI_INT_MSK
, mask
);
353 spin_unlock_irqrestore(&dev
->pci_irqmask_lock
, flags
);
356 static inline void cx23885_irq_disable_all(struct cx23885_dev
*dev
)
358 cx23885_irq_disable(dev
, 0xffffffff);
361 void cx23885_irq_remove(struct cx23885_dev
*dev
, u32 mask
)
364 spin_lock_irqsave(&dev
->pci_irqmask_lock
, flags
);
366 dev
->pci_irqmask
&= ~mask
;
367 cx_clear(PCI_INT_MSK
, mask
);
369 spin_unlock_irqrestore(&dev
->pci_irqmask_lock
, flags
);
372 static u32
cx23885_irq_get_mask(struct cx23885_dev
*dev
)
376 spin_lock_irqsave(&dev
->pci_irqmask_lock
, flags
);
378 v
= cx_read(PCI_INT_MSK
);
380 spin_unlock_irqrestore(&dev
->pci_irqmask_lock
, flags
);
384 static int cx23885_risc_decode(u32 risc
)
386 static char *instr
[16] = {
387 [RISC_SYNC
>> 28] = "sync",
388 [RISC_WRITE
>> 28] = "write",
389 [RISC_WRITEC
>> 28] = "writec",
390 [RISC_READ
>> 28] = "read",
391 [RISC_READC
>> 28] = "readc",
392 [RISC_JUMP
>> 28] = "jump",
393 [RISC_SKIP
>> 28] = "skip",
394 [RISC_WRITERM
>> 28] = "writerm",
395 [RISC_WRITECM
>> 28] = "writecm",
396 [RISC_WRITECR
>> 28] = "writecr",
398 static int incr
[16] = {
399 [RISC_WRITE
>> 28] = 3,
400 [RISC_JUMP
>> 28] = 3,
401 [RISC_SKIP
>> 28] = 1,
402 [RISC_SYNC
>> 28] = 1,
403 [RISC_WRITERM
>> 28] = 3,
404 [RISC_WRITECM
>> 28] = 3,
405 [RISC_WRITECR
>> 28] = 4,
407 static char *bits
[] = {
408 "12", "13", "14", "resync",
409 "cnt0", "cnt1", "18", "19",
410 "20", "21", "22", "23",
411 "irq1", "irq2", "eol", "sol",
415 printk(KERN_DEBUG
"0x%08x [ %s", risc
,
416 instr
[risc
>> 28] ? instr
[risc
>> 28] : "INVALID");
417 for (i
= ARRAY_SIZE(bits
) - 1; i
>= 0; i
--)
418 if (risc
& (1 << (i
+ 12)))
419 pr_cont(" %s", bits
[i
]);
420 pr_cont(" count=%d ]\n", risc
& 0xfff);
421 return incr
[risc
>> 28] ? incr
[risc
>> 28] : 1;
424 static void cx23885_wakeup(struct cx23885_tsport
*port
,
425 struct cx23885_dmaqueue
*q
, u32 count
)
427 struct cx23885_buffer
*buf
;
429 int max_buf_done
= 5; /* service maximum five buffers */
432 if (list_empty(&q
->active
))
434 buf
= list_entry(q
->active
.next
,
435 struct cx23885_buffer
, queue
);
437 buf
->vb
.vb2_buf
.timestamp
= ktime_get_ns();
438 buf
->vb
.sequence
= q
->count
++;
439 if (count
!= (q
->count
% 65536)) {
440 dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf
,
441 buf
->vb
.vb2_buf
.index
, count
, q
->count
);
443 dprintk(7, "[%p/%d] wakeup reg=%d buf=%d\n", buf
,
444 buf
->vb
.vb2_buf
.index
, count
, q
->count
);
446 list_del(&buf
->queue
);
447 vb2_buffer_done(&buf
->vb
.vb2_buf
, VB2_BUF_STATE_DONE
);
449 /* count register is 16 bits so apply modulo appropriately */
450 count_delta
= ((int)count
- (int)(q
->count
% 65536));
451 } while ((count_delta
> 0) && (max_buf_done
> 0));
454 int cx23885_sram_channel_setup(struct cx23885_dev
*dev
,
455 struct sram_channel
*ch
,
456 unsigned int bpl
, u32 risc
)
458 unsigned int i
, lines
;
461 if (ch
->cmds_start
== 0) {
462 dprintk(1, "%s() Erasing channel [%s]\n", __func__
,
464 cx_write(ch
->ptr1_reg
, 0);
465 cx_write(ch
->ptr2_reg
, 0);
466 cx_write(ch
->cnt2_reg
, 0);
467 cx_write(ch
->cnt1_reg
, 0);
470 dprintk(1, "%s() Configuring channel [%s]\n", __func__
,
474 bpl
= (bpl
+ 7) & ~7; /* alignment */
476 lines
= ch
->fifo_size
/ bpl
;
481 cx_write(8 + 0, RISC_JUMP
| RISC_CNT_RESET
);
486 for (i
= 0; i
< lines
; i
++) {
487 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__
, cdt
+ 16*i
,
488 ch
->fifo_start
+ bpl
*i
);
489 cx_write(cdt
+ 16*i
, ch
->fifo_start
+ bpl
*i
);
490 cx_write(cdt
+ 16*i
+ 4, 0);
491 cx_write(cdt
+ 16*i
+ 8, 0);
492 cx_write(cdt
+ 16*i
+ 12, 0);
497 cx_write(ch
->cmds_start
+ 0, 8);
499 cx_write(ch
->cmds_start
+ 0, risc
);
500 cx_write(ch
->cmds_start
+ 4, 0); /* 64 bits 63-32 */
501 cx_write(ch
->cmds_start
+ 8, cdt
);
502 cx_write(ch
->cmds_start
+ 12, (lines
*16) >> 3);
503 cx_write(ch
->cmds_start
+ 16, ch
->ctrl_start
);
505 cx_write(ch
->cmds_start
+ 20, 0x80000000 | (64 >> 2));
507 cx_write(ch
->cmds_start
+ 20, 64 >> 2);
508 for (i
= 24; i
< 80; i
+= 4)
509 cx_write(ch
->cmds_start
+ i
, 0);
512 cx_write(ch
->ptr1_reg
, ch
->fifo_start
);
513 cx_write(ch
->ptr2_reg
, cdt
);
514 cx_write(ch
->cnt2_reg
, (lines
*16) >> 3);
515 cx_write(ch
->cnt1_reg
, (bpl
>> 3) - 1);
517 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
526 void cx23885_sram_channel_dump(struct cx23885_dev
*dev
,
527 struct sram_channel
*ch
)
529 static char *name
[] = {
546 unsigned int i
, j
, n
;
548 pr_warn("%s: %s - dma channel status dump\n",
549 dev
->name
, ch
->name
);
550 for (i
= 0; i
< ARRAY_SIZE(name
); i
++)
551 pr_warn("%s: cmds: %-15s: 0x%08x\n",
553 cx_read(ch
->cmds_start
+ 4*i
));
555 for (i
= 0; i
< 4; i
++) {
556 risc
= cx_read(ch
->cmds_start
+ 4 * (i
+ 14));
557 pr_warn("%s: risc%d:", dev
->name
, i
);
558 cx23885_risc_decode(risc
);
560 for (i
= 0; i
< (64 >> 2); i
+= n
) {
561 risc
= cx_read(ch
->ctrl_start
+ 4 * i
);
562 /* No consideration for bits 63-32 */
564 pr_warn("%s: (0x%08x) iq %x:", dev
->name
,
565 ch
->ctrl_start
+ 4 * i
, i
);
566 n
= cx23885_risc_decode(risc
);
567 for (j
= 1; j
< n
; j
++) {
568 risc
= cx_read(ch
->ctrl_start
+ 4 * (i
+ j
));
569 pr_warn("%s: iq %x: 0x%08x [ arg #%d ]\n",
570 dev
->name
, i
+j
, risc
, j
);
574 pr_warn("%s: fifo: 0x%08x -> 0x%x\n",
575 dev
->name
, ch
->fifo_start
, ch
->fifo_start
+ch
->fifo_size
);
576 pr_warn("%s: ctrl: 0x%08x -> 0x%x\n",
577 dev
->name
, ch
->ctrl_start
, ch
->ctrl_start
+ 6*16);
578 pr_warn("%s: ptr1_reg: 0x%08x\n",
579 dev
->name
, cx_read(ch
->ptr1_reg
));
580 pr_warn("%s: ptr2_reg: 0x%08x\n",
581 dev
->name
, cx_read(ch
->ptr2_reg
));
582 pr_warn("%s: cnt1_reg: 0x%08x\n",
583 dev
->name
, cx_read(ch
->cnt1_reg
));
584 pr_warn("%s: cnt2_reg: 0x%08x\n",
585 dev
->name
, cx_read(ch
->cnt2_reg
));
588 static void cx23885_risc_disasm(struct cx23885_tsport
*port
,
589 struct cx23885_riscmem
*risc
)
591 struct cx23885_dev
*dev
= port
->dev
;
592 unsigned int i
, j
, n
;
594 pr_info("%s: risc disasm: %p [dma=0x%08lx]\n",
595 dev
->name
, risc
->cpu
, (unsigned long)risc
->dma
);
596 for (i
= 0; i
< (risc
->size
>> 2); i
+= n
) {
597 pr_info("%s: %04d:", dev
->name
, i
);
598 n
= cx23885_risc_decode(le32_to_cpu(risc
->cpu
[i
]));
599 for (j
= 1; j
< n
; j
++)
600 pr_info("%s: %04d: 0x%08x [ arg #%d ]\n",
601 dev
->name
, i
+ j
, risc
->cpu
[i
+ j
], j
);
602 if (risc
->cpu
[i
] == cpu_to_le32(RISC_JUMP
))
607 static void cx23885_clear_bridge_error(struct cx23885_dev
*dev
)
609 uint32_t reg1_val
, reg2_val
;
611 if (!dev
->need_dma_reset
)
614 reg1_val
= cx_read(TC_REQ
); /* read-only */
615 reg2_val
= cx_read(TC_REQ_SET
);
617 if (reg1_val
&& reg2_val
) {
618 cx_write(TC_REQ
, reg1_val
);
619 cx_write(TC_REQ_SET
, reg2_val
);
625 dev_info(&dev
->pci
->dev
,
626 "dma in progress detected 0x%08x 0x%08x, clearing\n",
631 static void cx23885_shutdown(struct cx23885_dev
*dev
)
633 /* disable RISC controller */
634 cx_write(DEV_CNTRL2
, 0);
636 /* Disable all IR activity */
637 cx_write(IR_CNTRL_REG
, 0);
639 /* Disable Video A/B activity */
640 cx_write(VID_A_DMA_CTL
, 0);
641 cx_write(VID_B_DMA_CTL
, 0);
642 cx_write(VID_C_DMA_CTL
, 0);
644 /* Disable Audio activity */
645 cx_write(AUD_INT_DMA_CTL
, 0);
646 cx_write(AUD_EXT_DMA_CTL
, 0);
648 /* Disable Serial port */
649 cx_write(UART_CTL
, 0);
651 /* Disable Interrupts */
652 cx23885_irq_disable_all(dev
);
653 cx_write(VID_A_INT_MSK
, 0);
654 cx_write(VID_B_INT_MSK
, 0);
655 cx_write(VID_C_INT_MSK
, 0);
656 cx_write(AUDIO_INT_INT_MSK
, 0);
657 cx_write(AUDIO_EXT_INT_MSK
, 0);
661 static void cx23885_reset(struct cx23885_dev
*dev
)
663 dprintk(1, "%s()\n", __func__
);
665 cx23885_shutdown(dev
);
667 cx_write(PCI_INT_STAT
, 0xffffffff);
668 cx_write(VID_A_INT_STAT
, 0xffffffff);
669 cx_write(VID_B_INT_STAT
, 0xffffffff);
670 cx_write(VID_C_INT_STAT
, 0xffffffff);
671 cx_write(AUDIO_INT_INT_STAT
, 0xffffffff);
672 cx_write(AUDIO_EXT_INT_STAT
, 0xffffffff);
673 cx_write(CLK_DELAY
, cx_read(CLK_DELAY
) & 0x80000000);
674 cx_write(PAD_CTRL
, 0x00500300);
676 /* clear dma in progress */
677 cx23885_clear_bridge_error(dev
);
680 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH01
],
682 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH02
], 128, 0);
683 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH03
],
685 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH04
], 128, 0);
686 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH05
], 128, 0);
687 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH06
],
689 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH07
], 128, 0);
690 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH08
], 128, 0);
691 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH09
], 128, 0);
693 cx23885_gpio_setup(dev
);
695 cx23885_irq_get_mask(dev
);
697 /* clear dma in progress */
698 cx23885_clear_bridge_error(dev
);
702 static int cx23885_pci_quirks(struct cx23885_dev
*dev
)
704 dprintk(1, "%s()\n", __func__
);
706 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
707 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
708 * occur on the cx23887 bridge.
710 if (dev
->bridge
== CX23885_BRIDGE_885
)
711 cx_clear(RDR_TLCTL0
, 1 << 4);
713 /* clear dma in progress */
714 cx23885_clear_bridge_error(dev
);
718 static int get_resources(struct cx23885_dev
*dev
)
720 if (request_mem_region(pci_resource_start(dev
->pci
, 0),
721 pci_resource_len(dev
->pci
, 0),
725 pr_err("%s: can't get MMIO memory @ 0x%llx\n",
726 dev
->name
, (unsigned long long)pci_resource_start(dev
->pci
, 0));
731 static int cx23885_init_tsport(struct cx23885_dev
*dev
,
732 struct cx23885_tsport
*port
, int portno
)
734 dprintk(1, "%s(portno=%d)\n", __func__
, portno
);
736 /* Transport bus init dma queue - Common settings */
737 port
->dma_ctl_val
= 0x11; /* Enable RISC controller and Fifo */
738 port
->ts_int_msk_val
= 0x1111; /* TS port bits for RISC */
739 port
->vld_misc_val
= 0x0;
740 port
->hw_sop_ctrl_val
= (0x47 << 16 | 188 << 4);
742 spin_lock_init(&port
->slock
);
746 INIT_LIST_HEAD(&port
->mpegq
.active
);
747 mutex_init(&port
->frontends
.lock
);
748 INIT_LIST_HEAD(&port
->frontends
.felist
);
749 port
->frontends
.active_fe_id
= 0;
751 /* This should be hardcoded allow a single frontend
752 * attachment to this tsport, keeping the -dvb.c
753 * code clean and safe.
755 if (!port
->num_frontends
)
756 port
->num_frontends
= 1;
760 port
->reg_gpcnt
= VID_B_GPCNT
;
761 port
->reg_gpcnt_ctl
= VID_B_GPCNT_CTL
;
762 port
->reg_dma_ctl
= VID_B_DMA_CTL
;
763 port
->reg_lngth
= VID_B_LNGTH
;
764 port
->reg_hw_sop_ctrl
= VID_B_HW_SOP_CTL
;
765 port
->reg_gen_ctrl
= VID_B_GEN_CTL
;
766 port
->reg_bd_pkt_status
= VID_B_BD_PKT_STATUS
;
767 port
->reg_sop_status
= VID_B_SOP_STATUS
;
768 port
->reg_fifo_ovfl_stat
= VID_B_FIFO_OVFL_STAT
;
769 port
->reg_vld_misc
= VID_B_VLD_MISC
;
770 port
->reg_ts_clk_en
= VID_B_TS_CLK_EN
;
771 port
->reg_src_sel
= VID_B_SRC_SEL
;
772 port
->reg_ts_int_msk
= VID_B_INT_MSK
;
773 port
->reg_ts_int_stat
= VID_B_INT_STAT
;
774 port
->sram_chno
= SRAM_CH03
; /* VID_B */
775 port
->pci_irqmask
= 0x02; /* VID_B bit1 */
778 port
->reg_gpcnt
= VID_C_GPCNT
;
779 port
->reg_gpcnt_ctl
= VID_C_GPCNT_CTL
;
780 port
->reg_dma_ctl
= VID_C_DMA_CTL
;
781 port
->reg_lngth
= VID_C_LNGTH
;
782 port
->reg_hw_sop_ctrl
= VID_C_HW_SOP_CTL
;
783 port
->reg_gen_ctrl
= VID_C_GEN_CTL
;
784 port
->reg_bd_pkt_status
= VID_C_BD_PKT_STATUS
;
785 port
->reg_sop_status
= VID_C_SOP_STATUS
;
786 port
->reg_fifo_ovfl_stat
= VID_C_FIFO_OVFL_STAT
;
787 port
->reg_vld_misc
= VID_C_VLD_MISC
;
788 port
->reg_ts_clk_en
= VID_C_TS_CLK_EN
;
789 port
->reg_src_sel
= 0;
790 port
->reg_ts_int_msk
= VID_C_INT_MSK
;
791 port
->reg_ts_int_stat
= VID_C_INT_STAT
;
792 port
->sram_chno
= SRAM_CH06
; /* VID_C */
793 port
->pci_irqmask
= 0x04; /* VID_C bit2 */
802 static void cx23885_dev_checkrevision(struct cx23885_dev
*dev
)
804 switch (cx_read(RDR_CFG2
) & 0xff) {
807 dev
->hwrevision
= 0xa0;
811 dev
->hwrevision
= 0xa1;
814 /* CX23885-13Z/14Z */
815 dev
->hwrevision
= 0xb0;
818 if (dev
->pci
->device
== 0x8880) {
819 /* CX23888-21Z/22Z */
820 dev
->hwrevision
= 0xc0;
823 dev
->hwrevision
= 0xa4;
827 if (dev
->pci
->device
== 0x8880) {
829 dev
->hwrevision
= 0xd0;
831 /* CX23885-15Z, CX23888-31Z */
832 dev
->hwrevision
= 0xa5;
837 dev
->hwrevision
= 0xc0;
841 dev
->hwrevision
= 0xb1;
844 pr_err("%s() New hardware revision found 0x%x\n",
845 __func__
, dev
->hwrevision
);
848 pr_info("%s() Hardware revision = 0x%02x\n",
849 __func__
, dev
->hwrevision
);
851 pr_err("%s() Hardware revision unknown 0x%x\n",
852 __func__
, dev
->hwrevision
);
855 /* Find the first v4l2_subdev member of the group id in hw */
856 struct v4l2_subdev
*cx23885_find_hw(struct cx23885_dev
*dev
, u32 hw
)
858 struct v4l2_subdev
*result
= NULL
;
859 struct v4l2_subdev
*sd
;
861 spin_lock(&dev
->v4l2_dev
.lock
);
862 v4l2_device_for_each_subdev(sd
, &dev
->v4l2_dev
) {
863 if (sd
->grp_id
== hw
) {
868 spin_unlock(&dev
->v4l2_dev
.lock
);
872 static int cx23885_dev_setup(struct cx23885_dev
*dev
)
876 spin_lock_init(&dev
->pci_irqmask_lock
);
877 spin_lock_init(&dev
->slock
);
879 mutex_init(&dev
->lock
);
880 mutex_init(&dev
->gpio_lock
);
882 atomic_inc(&dev
->refcount
);
884 dev
->nr
= cx23885_devcount
++;
885 sprintf(dev
->name
, "cx23885[%d]", dev
->nr
);
887 /* Configure the internal memory */
888 if (dev
->pci
->device
== 0x8880) {
889 /* Could be 887 or 888, assume an 888 default */
890 dev
->bridge
= CX23885_BRIDGE_888
;
891 /* Apply a sensible clock frequency for the PCIe bridge */
892 dev
->clk_freq
= 50000000;
893 dev
->sram_channels
= cx23887_sram_channels
;
895 if (dev
->pci
->device
== 0x8852) {
896 dev
->bridge
= CX23885_BRIDGE_885
;
897 /* Apply a sensible clock frequency for the PCIe bridge */
898 dev
->clk_freq
= 28000000;
899 dev
->sram_channels
= cx23885_sram_channels
;
903 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
904 __func__
, dev
->bridge
);
908 if (card
[dev
->nr
] < cx23885_bcount
)
909 dev
->board
= card
[dev
->nr
];
910 for (i
= 0; UNSET
== dev
->board
&& i
< cx23885_idcount
; i
++)
911 if (dev
->pci
->subsystem_vendor
== cx23885_subids
[i
].subvendor
&&
912 dev
->pci
->subsystem_device
== cx23885_subids
[i
].subdevice
)
913 dev
->board
= cx23885_subids
[i
].card
;
914 if (UNSET
== dev
->board
) {
915 dev
->board
= CX23885_BOARD_UNKNOWN
;
916 cx23885_card_list(dev
);
919 if (dev
->pci
->device
== 0x8852) {
920 /* no DIF on cx23885, so no analog tuner support possible */
921 if (dev
->board
== CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC
)
922 dev
->board
= CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC_885
;
923 else if (dev
->board
== CX23885_BOARD_HAUPPAUGE_QUADHD_DVB
)
924 dev
->board
= CX23885_BOARD_HAUPPAUGE_QUADHD_DVB_885
;
927 /* If the user specific a clk freq override, apply it */
928 if (cx23885_boards
[dev
->board
].clk_freq
> 0)
929 dev
->clk_freq
= cx23885_boards
[dev
->board
].clk_freq
;
931 if (dev
->board
== CX23885_BOARD_HAUPPAUGE_IMPACTVCBE
&&
932 dev
->pci
->subsystem_device
== 0x7137) {
933 /* Hauppauge ImpactVCBe device ID 0x7137 is populated
934 * with an 888, and a 25Mhz crystal, instead of the
935 * usual third overtone 50Mhz. The default clock rate must
936 * be overridden so the cx25840 is properly configured
938 dev
->clk_freq
= 25000000;
941 dev
->pci_bus
= dev
->pci
->bus
->number
;
942 dev
->pci_slot
= PCI_SLOT(dev
->pci
->devfn
);
943 cx23885_irq_add(dev
, 0x001f00);
945 /* External Master 1 Bus */
946 dev
->i2c_bus
[0].nr
= 0;
947 dev
->i2c_bus
[0].dev
= dev
;
948 dev
->i2c_bus
[0].reg_stat
= I2C1_STAT
;
949 dev
->i2c_bus
[0].reg_ctrl
= I2C1_CTRL
;
950 dev
->i2c_bus
[0].reg_addr
= I2C1_ADDR
;
951 dev
->i2c_bus
[0].reg_rdata
= I2C1_RDATA
;
952 dev
->i2c_bus
[0].reg_wdata
= I2C1_WDATA
;
953 dev
->i2c_bus
[0].i2c_period
= (0x9d << 24); /* 100kHz */
955 /* External Master 2 Bus */
956 dev
->i2c_bus
[1].nr
= 1;
957 dev
->i2c_bus
[1].dev
= dev
;
958 dev
->i2c_bus
[1].reg_stat
= I2C2_STAT
;
959 dev
->i2c_bus
[1].reg_ctrl
= I2C2_CTRL
;
960 dev
->i2c_bus
[1].reg_addr
= I2C2_ADDR
;
961 dev
->i2c_bus
[1].reg_rdata
= I2C2_RDATA
;
962 dev
->i2c_bus
[1].reg_wdata
= I2C2_WDATA
;
963 dev
->i2c_bus
[1].i2c_period
= (0x9d << 24); /* 100kHz */
965 /* Internal Master 3 Bus */
966 dev
->i2c_bus
[2].nr
= 2;
967 dev
->i2c_bus
[2].dev
= dev
;
968 dev
->i2c_bus
[2].reg_stat
= I2C3_STAT
;
969 dev
->i2c_bus
[2].reg_ctrl
= I2C3_CTRL
;
970 dev
->i2c_bus
[2].reg_addr
= I2C3_ADDR
;
971 dev
->i2c_bus
[2].reg_rdata
= I2C3_RDATA
;
972 dev
->i2c_bus
[2].reg_wdata
= I2C3_WDATA
;
973 dev
->i2c_bus
[2].i2c_period
= (0x07 << 24); /* 1.95MHz */
975 if ((cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_DVB
) ||
976 (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
))
977 cx23885_init_tsport(dev
, &dev
->ts1
, 1);
979 if ((cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_DVB
) ||
980 (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_ENCODER
))
981 cx23885_init_tsport(dev
, &dev
->ts2
, 2);
983 if (get_resources(dev
) < 0) {
984 pr_err("CORE %s No more PCIe resources for subsystem: %04x:%04x\n",
985 dev
->name
, dev
->pci
->subsystem_vendor
,
986 dev
->pci
->subsystem_device
);
993 dev
->lmmio
= ioremap(pci_resource_start(dev
->pci
, 0),
994 pci_resource_len(dev
->pci
, 0));
996 dev
->bmmio
= (u8 __iomem
*)dev
->lmmio
;
998 pr_info("CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
999 dev
->name
, dev
->pci
->subsystem_vendor
,
1000 dev
->pci
->subsystem_device
, cx23885_boards
[dev
->board
].name
,
1001 dev
->board
, card
[dev
->nr
] == dev
->board
?
1002 "insmod option" : "autodetected");
1004 cx23885_pci_quirks(dev
);
1006 /* Assume some sensible defaults */
1007 dev
->tuner_type
= cx23885_boards
[dev
->board
].tuner_type
;
1008 dev
->tuner_addr
= cx23885_boards
[dev
->board
].tuner_addr
;
1009 dev
->tuner_bus
= cx23885_boards
[dev
->board
].tuner_bus
;
1010 dev
->radio_type
= cx23885_boards
[dev
->board
].radio_type
;
1011 dev
->radio_addr
= cx23885_boards
[dev
->board
].radio_addr
;
1013 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
1014 __func__
, dev
->tuner_type
, dev
->tuner_addr
, dev
->tuner_bus
);
1015 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
1016 __func__
, dev
->radio_type
, dev
->radio_addr
);
1018 /* The cx23417 encoder has GPIO's that need to be initialised
1019 * before DVB, so that demodulators and tuners are out of
1020 * reset before DVB uses them.
1022 if ((cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
) ||
1023 (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_ENCODER
))
1024 cx23885_mc417_init(dev
);
1029 cx23885_i2c_register(&dev
->i2c_bus
[0]);
1030 cx23885_i2c_register(&dev
->i2c_bus
[1]);
1031 cx23885_i2c_register(&dev
->i2c_bus
[2]);
1032 cx23885_card_setup(dev
);
1033 call_all(dev
, tuner
, standby
);
1034 cx23885_ir_init(dev
);
1036 if (dev
->board
== CX23885_BOARD_VIEWCAST_460E
) {
1038 * GPIOs 9/8 are input detection bits for the breakout video
1039 * (gpio 8) and audio (gpio 9) cables. When they're attached,
1040 * this gpios are pulled high. Make sure these GPIOs are marked
1043 cx23885_gpio_enable(dev
, 0x300, 0);
1046 if (cx23885_boards
[dev
->board
].porta
== CX23885_ANALOG_VIDEO
) {
1047 if (cx23885_video_register(dev
) < 0) {
1048 pr_err("%s() Failed to register analog video adapters on VID_A\n",
1053 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_DVB
) {
1054 if (cx23885_boards
[dev
->board
].num_fds_portb
)
1055 dev
->ts1
.num_frontends
=
1056 cx23885_boards
[dev
->board
].num_fds_portb
;
1057 if (cx23885_dvb_register(&dev
->ts1
) < 0) {
1058 pr_err("%s() Failed to register dvb adapters on VID_B\n",
1062 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
) {
1063 if (cx23885_417_register(dev
) < 0) {
1064 pr_err("%s() Failed to register 417 on VID_B\n",
1069 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_DVB
) {
1070 if (cx23885_boards
[dev
->board
].num_fds_portc
)
1071 dev
->ts2
.num_frontends
=
1072 cx23885_boards
[dev
->board
].num_fds_portc
;
1073 if (cx23885_dvb_register(&dev
->ts2
) < 0) {
1074 pr_err("%s() Failed to register dvb on VID_C\n",
1078 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_ENCODER
) {
1079 if (cx23885_417_register(dev
) < 0) {
1080 pr_err("%s() Failed to register 417 on VID_C\n",
1085 cx23885_dev_checkrevision(dev
);
1087 /* disable MSI for NetUP cards, otherwise CI is not working */
1088 if (cx23885_boards
[dev
->board
].ci_type
> 0)
1089 cx_clear(RDR_RDRCTL1
, 1 << 8);
1091 switch (dev
->board
) {
1092 case CX23885_BOARD_TEVII_S470
:
1093 case CX23885_BOARD_TEVII_S471
:
1094 cx_clear(RDR_RDRCTL1
, 1 << 8);
1101 static void cx23885_dev_unregister(struct cx23885_dev
*dev
)
1103 release_mem_region(pci_resource_start(dev
->pci
, 0),
1104 pci_resource_len(dev
->pci
, 0));
1106 if (!atomic_dec_and_test(&dev
->refcount
))
1109 if (cx23885_boards
[dev
->board
].porta
== CX23885_ANALOG_VIDEO
)
1110 cx23885_video_unregister(dev
);
1112 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_DVB
)
1113 cx23885_dvb_unregister(&dev
->ts1
);
1115 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
)
1116 cx23885_417_unregister(dev
);
1118 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_DVB
)
1119 cx23885_dvb_unregister(&dev
->ts2
);
1121 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_ENCODER
)
1122 cx23885_417_unregister(dev
);
1124 cx23885_i2c_unregister(&dev
->i2c_bus
[2]);
1125 cx23885_i2c_unregister(&dev
->i2c_bus
[1]);
1126 cx23885_i2c_unregister(&dev
->i2c_bus
[0]);
1128 iounmap(dev
->lmmio
);
1131 static __le32
*cx23885_risc_field(__le32
*rp
, struct scatterlist
*sglist
,
1132 unsigned int offset
, u32 sync_line
,
1133 unsigned int bpl
, unsigned int padding
,
1134 unsigned int lines
, unsigned int lpi
, bool jump
)
1136 struct scatterlist
*sg
;
1137 unsigned int line
, todo
, sol
;
1141 *(rp
++) = cpu_to_le32(RISC_JUMP
);
1142 *(rp
++) = cpu_to_le32(0);
1143 *(rp
++) = cpu_to_le32(0); /* bits 63-32 */
1146 /* sync instruction */
1147 if (sync_line
!= NO_SYNC_LINE
)
1148 *(rp
++) = cpu_to_le32(RISC_RESYNC
| sync_line
);
1152 for (line
= 0; line
< lines
; line
++) {
1153 while (offset
&& offset
>= sg_dma_len(sg
)) {
1154 offset
-= sg_dma_len(sg
);
1158 if (lpi
&& line
> 0 && !(line
% lpi
))
1159 sol
= RISC_SOL
| RISC_IRQ1
| RISC_CNT_INC
;
1163 if (bpl
<= sg_dma_len(sg
)-offset
) {
1164 /* fits into current chunk */
1165 *(rp
++) = cpu_to_le32(RISC_WRITE
|sol
|RISC_EOL
|bpl
);
1166 *(rp
++) = cpu_to_le32(sg_dma_address(sg
)+offset
);
1167 *(rp
++) = cpu_to_le32(0); /* bits 63-32 */
1170 /* scanline needs to be split */
1172 *(rp
++) = cpu_to_le32(RISC_WRITE
|sol
|
1173 (sg_dma_len(sg
)-offset
));
1174 *(rp
++) = cpu_to_le32(sg_dma_address(sg
)+offset
);
1175 *(rp
++) = cpu_to_le32(0); /* bits 63-32 */
1176 todo
-= (sg_dma_len(sg
)-offset
);
1179 while (todo
> sg_dma_len(sg
)) {
1180 *(rp
++) = cpu_to_le32(RISC_WRITE
|
1182 *(rp
++) = cpu_to_le32(sg_dma_address(sg
));
1183 *(rp
++) = cpu_to_le32(0); /* bits 63-32 */
1184 todo
-= sg_dma_len(sg
);
1187 *(rp
++) = cpu_to_le32(RISC_WRITE
|RISC_EOL
|todo
);
1188 *(rp
++) = cpu_to_le32(sg_dma_address(sg
));
1189 *(rp
++) = cpu_to_le32(0); /* bits 63-32 */
1198 int cx23885_risc_buffer(struct pci_dev
*pci
, struct cx23885_riscmem
*risc
,
1199 struct scatterlist
*sglist
, unsigned int top_offset
,
1200 unsigned int bottom_offset
, unsigned int bpl
,
1201 unsigned int padding
, unsigned int lines
)
1203 u32 instructions
, fields
;
1207 if (UNSET
!= top_offset
)
1209 if (UNSET
!= bottom_offset
)
1212 /* estimate risc mem: worst case is one write per page border +
1213 one write per scan line + syncs + jump (all 2 dwords). Padding
1214 can cause next bpl to start close to a page border. First DMA
1215 region may be smaller than PAGE_SIZE */
1216 /* write and jump need and extra dword */
1217 instructions
= fields
* (1 + ((bpl
+ padding
) * lines
)
1218 / PAGE_SIZE
+ lines
);
1220 risc
->size
= instructions
* 12;
1221 risc
->cpu
= dma_alloc_coherent(&pci
->dev
, risc
->size
, &risc
->dma
,
1223 if (risc
->cpu
== NULL
)
1226 /* write risc instructions */
1228 if (UNSET
!= top_offset
)
1229 rp
= cx23885_risc_field(rp
, sglist
, top_offset
, 0,
1230 bpl
, padding
, lines
, 0, true);
1231 if (UNSET
!= bottom_offset
)
1232 rp
= cx23885_risc_field(rp
, sglist
, bottom_offset
, 0x200,
1233 bpl
, padding
, lines
, 0, UNSET
== top_offset
);
1235 /* save pointer to jmp instruction address */
1237 BUG_ON((risc
->jmp
- risc
->cpu
+ 2) * sizeof(*risc
->cpu
) > risc
->size
);
1241 int cx23885_risc_databuffer(struct pci_dev
*pci
,
1242 struct cx23885_riscmem
*risc
,
1243 struct scatterlist
*sglist
,
1245 unsigned int lines
, unsigned int lpi
)
1250 /* estimate risc mem: worst case is one write per page border +
1251 one write per scan line + syncs + jump (all 2 dwords). Here
1252 there is no padding and no sync. First DMA region may be smaller
1254 /* Jump and write need an extra dword */
1255 instructions
= 1 + (bpl
* lines
) / PAGE_SIZE
+ lines
;
1258 risc
->size
= instructions
* 12;
1259 risc
->cpu
= dma_alloc_coherent(&pci
->dev
, risc
->size
, &risc
->dma
,
1261 if (risc
->cpu
== NULL
)
1264 /* write risc instructions */
1266 rp
= cx23885_risc_field(rp
, sglist
, 0, NO_SYNC_LINE
,
1267 bpl
, 0, lines
, lpi
, lpi
== 0);
1269 /* save pointer to jmp instruction address */
1271 BUG_ON((risc
->jmp
- risc
->cpu
+ 2) * sizeof(*risc
->cpu
) > risc
->size
);
1275 int cx23885_risc_vbibuffer(struct pci_dev
*pci
, struct cx23885_riscmem
*risc
,
1276 struct scatterlist
*sglist
, unsigned int top_offset
,
1277 unsigned int bottom_offset
, unsigned int bpl
,
1278 unsigned int padding
, unsigned int lines
)
1280 u32 instructions
, fields
;
1284 if (UNSET
!= top_offset
)
1286 if (UNSET
!= bottom_offset
)
1289 /* estimate risc mem: worst case is one write per page border +
1290 one write per scan line + syncs + jump (all 2 dwords). Padding
1291 can cause next bpl to start close to a page border. First DMA
1292 region may be smaller than PAGE_SIZE */
1293 /* write and jump need and extra dword */
1294 instructions
= fields
* (1 + ((bpl
+ padding
) * lines
)
1295 / PAGE_SIZE
+ lines
);
1297 risc
->size
= instructions
* 12;
1298 risc
->cpu
= dma_alloc_coherent(&pci
->dev
, risc
->size
, &risc
->dma
,
1300 if (risc
->cpu
== NULL
)
1302 /* write risc instructions */
1305 /* Sync to line 6, so US CC line 21 will appear in line '12'
1306 * in the userland vbi payload */
1307 if (UNSET
!= top_offset
)
1308 rp
= cx23885_risc_field(rp
, sglist
, top_offset
, 0,
1309 bpl
, padding
, lines
, 0, true);
1311 if (UNSET
!= bottom_offset
)
1312 rp
= cx23885_risc_field(rp
, sglist
, bottom_offset
, 0x200,
1313 bpl
, padding
, lines
, 0, UNSET
== top_offset
);
1317 /* save pointer to jmp instruction address */
1319 BUG_ON((risc
->jmp
- risc
->cpu
+ 2) * sizeof(*risc
->cpu
) > risc
->size
);
1324 void cx23885_free_buffer(struct cx23885_dev
*dev
, struct cx23885_buffer
*buf
)
1326 struct cx23885_riscmem
*risc
= &buf
->risc
;
1329 dma_free_coherent(&dev
->pci
->dev
, risc
->size
, risc
->cpu
, risc
->dma
);
1330 memset(risc
, 0, sizeof(*risc
));
1333 static void cx23885_tsport_reg_dump(struct cx23885_tsport
*port
)
1335 struct cx23885_dev
*dev
= port
->dev
;
1337 dprintk(1, "%s() Register Dump\n", __func__
);
1338 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__
,
1339 cx_read(DEV_CNTRL2
));
1340 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__
,
1341 cx23885_irq_get_mask(dev
));
1342 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__
,
1343 cx_read(AUDIO_INT_INT_MSK
));
1344 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__
,
1345 cx_read(AUD_INT_DMA_CTL
));
1346 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__
,
1347 cx_read(AUDIO_EXT_INT_MSK
));
1348 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__
,
1349 cx_read(AUD_EXT_DMA_CTL
));
1350 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__
,
1352 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__
,
1353 cx_read(ALT_PIN_OUT_SEL
));
1354 dprintk(1, "%s() GPIO2 0x%08X\n", __func__
,
1356 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__
,
1357 port
->reg_gpcnt
, cx_read(port
->reg_gpcnt
));
1358 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__
,
1359 port
->reg_gpcnt_ctl
, cx_read(port
->reg_gpcnt_ctl
));
1360 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__
,
1361 port
->reg_dma_ctl
, cx_read(port
->reg_dma_ctl
));
1362 if (port
->reg_src_sel
)
1363 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__
,
1364 port
->reg_src_sel
, cx_read(port
->reg_src_sel
));
1365 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__
,
1366 port
->reg_lngth
, cx_read(port
->reg_lngth
));
1367 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__
,
1368 port
->reg_hw_sop_ctrl
, cx_read(port
->reg_hw_sop_ctrl
));
1369 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__
,
1370 port
->reg_gen_ctrl
, cx_read(port
->reg_gen_ctrl
));
1371 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__
,
1372 port
->reg_bd_pkt_status
, cx_read(port
->reg_bd_pkt_status
));
1373 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__
,
1374 port
->reg_sop_status
, cx_read(port
->reg_sop_status
));
1375 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__
,
1376 port
->reg_fifo_ovfl_stat
, cx_read(port
->reg_fifo_ovfl_stat
));
1377 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__
,
1378 port
->reg_vld_misc
, cx_read(port
->reg_vld_misc
));
1379 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__
,
1380 port
->reg_ts_clk_en
, cx_read(port
->reg_ts_clk_en
));
1381 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__
,
1382 port
->reg_ts_int_msk
, cx_read(port
->reg_ts_int_msk
));
1383 dprintk(1, "%s() ts_int_status(0x%08X) 0x%08x\n", __func__
,
1384 port
->reg_ts_int_stat
, cx_read(port
->reg_ts_int_stat
));
1385 dprintk(1, "%s() PCI_INT_STAT 0x%08X\n", __func__
,
1386 cx_read(PCI_INT_STAT
));
1387 dprintk(1, "%s() VID_B_INT_MSTAT 0x%08X\n", __func__
,
1388 cx_read(VID_B_INT_MSTAT
));
1389 dprintk(1, "%s() VID_B_INT_SSTAT 0x%08X\n", __func__
,
1390 cx_read(VID_B_INT_SSTAT
));
1391 dprintk(1, "%s() VID_C_INT_MSTAT 0x%08X\n", __func__
,
1392 cx_read(VID_C_INT_MSTAT
));
1393 dprintk(1, "%s() VID_C_INT_SSTAT 0x%08X\n", __func__
,
1394 cx_read(VID_C_INT_SSTAT
));
1397 int cx23885_start_dma(struct cx23885_tsport
*port
,
1398 struct cx23885_dmaqueue
*q
,
1399 struct cx23885_buffer
*buf
)
1401 struct cx23885_dev
*dev
= port
->dev
;
1404 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__
,
1405 dev
->width
, dev
->height
, dev
->field
);
1407 /* clear dma in progress */
1408 cx23885_clear_bridge_error(dev
);
1410 /* Stop the fifo and risc engine for this port */
1411 cx_clear(port
->reg_dma_ctl
, port
->dma_ctl_val
);
1413 /* setup fifo + format */
1414 cx23885_sram_channel_setup(dev
,
1415 &dev
->sram_channels
[port
->sram_chno
],
1416 port
->ts_packet_size
, buf
->risc
.dma
);
1418 cx23885_sram_channel_dump(dev
,
1419 &dev
->sram_channels
[port
->sram_chno
]);
1420 cx23885_risc_disasm(port
, &buf
->risc
);
1423 /* write TS length to chip */
1424 cx_write(port
->reg_lngth
, port
->ts_packet_size
);
1426 if ((!(cx23885_boards
[dev
->board
].portb
& CX23885_MPEG_DVB
)) &&
1427 (!(cx23885_boards
[dev
->board
].portc
& CX23885_MPEG_DVB
))) {
1428 pr_err("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1430 cx23885_boards
[dev
->board
].portb
,
1431 cx23885_boards
[dev
->board
].portc
);
1435 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
)
1436 cx23885_av_clk(dev
, 0);
1440 /* If the port supports SRC SELECT, configure it */
1441 if (port
->reg_src_sel
)
1442 cx_write(port
->reg_src_sel
, port
->src_sel_val
);
1444 cx_write(port
->reg_hw_sop_ctrl
, port
->hw_sop_ctrl_val
);
1445 cx_write(port
->reg_ts_clk_en
, port
->ts_clk_en_val
);
1446 cx_write(port
->reg_vld_misc
, port
->vld_misc_val
);
1447 cx_write(port
->reg_gen_ctrl
, port
->gen_ctrl_val
);
1450 /* NOTE: this is 2 (reserved) for portb, does it matter? */
1451 /* reset counter to zero */
1452 cx_write(port
->reg_gpcnt_ctl
, 3);
1455 /* Set VIDB pins to input */
1456 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_DVB
) {
1457 reg
= cx_read(PAD_CTRL
);
1458 reg
&= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1459 cx_write(PAD_CTRL
, reg
);
1462 /* Set VIDC pins to input */
1463 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_DVB
) {
1464 reg
= cx_read(PAD_CTRL
);
1465 reg
&= ~0x4; /* Clear TS2_SOP_OE */
1466 cx_write(PAD_CTRL
, reg
);
1469 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
) {
1471 reg
= cx_read(PAD_CTRL
);
1472 reg
= reg
& ~0x1; /* Clear TS1_OE */
1474 /* FIXME, bit 2 writing here is questionable */
1475 /* set TS1_SOP_OE and TS1_OE_HI */
1477 cx_write(PAD_CTRL
, reg
);
1479 /* Sets MOE_CLK_DIS to disable MoE clock */
1480 /* sets MCLK_DLY_SEL/BCLK_DLY_SEL to 1 buffer delay each */
1481 cx_write(CLK_DELAY
, cx_read(CLK_DELAY
) | 0x80000011);
1483 /* ALT_GPIO_ALT_SET: GPIO[0]
1484 * IR_ALT_TX_SEL: GPIO[1]
1485 * GPIO1_ALT_SEL: VIP_656_DATA[0]
1486 * GPIO0_ALT_SEL: VIP_656_CLK
1488 cx_write(ALT_PIN_OUT_SEL
, 0x10100045);
1491 switch (dev
->bridge
) {
1492 case CX23885_BRIDGE_885
:
1493 case CX23885_BRIDGE_887
:
1494 case CX23885_BRIDGE_888
:
1496 dprintk(1, "%s() enabling TS int's and DMA\n", __func__
);
1497 /* clear dma in progress */
1498 cx23885_clear_bridge_error(dev
);
1499 cx_set(port
->reg_ts_int_msk
, port
->ts_int_msk_val
);
1500 cx_set(port
->reg_dma_ctl
, port
->dma_ctl_val
);
1502 /* clear dma in progress */
1503 cx23885_clear_bridge_error(dev
);
1504 cx23885_irq_add(dev
, port
->pci_irqmask
);
1505 cx23885_irq_enable_all(dev
);
1507 /* clear dma in progress */
1508 cx23885_clear_bridge_error(dev
);
1514 cx_set(DEV_CNTRL2
, (1<<5)); /* Enable RISC controller */
1515 /* clear dma in progress */
1516 cx23885_clear_bridge_error(dev
);
1518 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
)
1519 cx23885_av_clk(dev
, 1);
1522 cx23885_tsport_reg_dump(port
);
1524 cx23885_irq_get_mask(dev
);
1526 /* clear dma in progress */
1527 cx23885_clear_bridge_error(dev
);
1532 static int cx23885_stop_dma(struct cx23885_tsport
*port
)
1534 struct cx23885_dev
*dev
= port
->dev
;
1540 dprintk(1, "%s()\n", __func__
);
1542 /* Stop interrupts and DMA */
1543 cx_clear(port
->reg_ts_int_msk
, port
->ts_int_msk_val
);
1544 cx_clear(port
->reg_dma_ctl
, port
->dma_ctl_val
);
1545 /* just in case wait for any dma to complete before allowing dealloc */
1547 for (delay
= 0; delay
< 100; delay
++) {
1548 reg1_val
= cx_read(TC_REQ
);
1549 reg2_val
= cx_read(TC_REQ_SET
);
1550 if (reg1_val
== 0 || reg2_val
== 0)
1554 dev_dbg(&dev
->pci
->dev
, "delay=%d reg1=0x%08x reg2=0x%08x\n",
1555 delay
, reg1_val
, reg2_val
);
1557 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
) {
1558 reg
= cx_read(PAD_CTRL
);
1563 /* clear TS1_SOP_OE and TS1_OE_HI */
1565 cx_write(PAD_CTRL
, reg
);
1566 cx_write(port
->reg_src_sel
, 0);
1567 cx_write(port
->reg_gen_ctrl
, 8);
1570 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
)
1571 cx23885_av_clk(dev
, 0);
1576 /* ------------------------------------------------------------------ */
1578 int cx23885_buf_prepare(struct cx23885_buffer
*buf
, struct cx23885_tsport
*port
)
1580 struct cx23885_dev
*dev
= port
->dev
;
1581 int size
= port
->ts_packet_size
* port
->ts_packet_count
;
1582 struct sg_table
*sgt
= vb2_dma_sg_plane_desc(&buf
->vb
.vb2_buf
, 0);
1584 dprintk(1, "%s: %p\n", __func__
, buf
);
1585 if (vb2_plane_size(&buf
->vb
.vb2_buf
, 0) < size
)
1587 vb2_set_plane_payload(&buf
->vb
.vb2_buf
, 0, size
);
1589 cx23885_risc_databuffer(dev
->pci
, &buf
->risc
,
1591 port
->ts_packet_size
, port
->ts_packet_count
, 0);
1596 * The risc program for each buffer works as follows: it starts with a simple
1597 * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
1598 * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
1599 * the initial JUMP).
1601 * This is the risc program of the first buffer to be queued if the active list
1602 * is empty and it just keeps DMAing this buffer without generating any
1605 * If a new buffer is added then the initial JUMP in the code for that buffer
1606 * will generate an interrupt which signals that the previous buffer has been
1607 * DMAed successfully and that it can be returned to userspace.
1609 * It also sets the final jump of the previous buffer to the start of the new
1610 * buffer, thus chaining the new buffer into the DMA chain. This is a single
1611 * atomic u32 write, so there is no race condition.
1613 * The end-result of all this that you only get an interrupt when a buffer
1614 * is ready, so the control flow is very easy.
1616 void cx23885_buf_queue(struct cx23885_tsport
*port
, struct cx23885_buffer
*buf
)
1618 struct cx23885_buffer
*prev
;
1619 struct cx23885_dev
*dev
= port
->dev
;
1620 struct cx23885_dmaqueue
*cx88q
= &port
->mpegq
;
1621 unsigned long flags
;
1623 buf
->risc
.cpu
[1] = cpu_to_le32(buf
->risc
.dma
+ 12);
1624 buf
->risc
.jmp
[0] = cpu_to_le32(RISC_JUMP
| RISC_CNT_INC
);
1625 buf
->risc
.jmp
[1] = cpu_to_le32(buf
->risc
.dma
+ 12);
1626 buf
->risc
.jmp
[2] = cpu_to_le32(0); /* bits 63-32 */
1628 spin_lock_irqsave(&dev
->slock
, flags
);
1629 if (list_empty(&cx88q
->active
)) {
1630 list_add_tail(&buf
->queue
, &cx88q
->active
);
1631 dprintk(1, "[%p/%d] %s - first active\n",
1632 buf
, buf
->vb
.vb2_buf
.index
, __func__
);
1634 buf
->risc
.cpu
[0] |= cpu_to_le32(RISC_IRQ1
);
1635 prev
= list_entry(cx88q
->active
.prev
, struct cx23885_buffer
,
1637 list_add_tail(&buf
->queue
, &cx88q
->active
);
1638 prev
->risc
.jmp
[1] = cpu_to_le32(buf
->risc
.dma
);
1639 dprintk(1, "[%p/%d] %s - append to active\n",
1640 buf
, buf
->vb
.vb2_buf
.index
, __func__
);
1642 spin_unlock_irqrestore(&dev
->slock
, flags
);
1645 /* ----------------------------------------------------------- */
1647 static void do_cancel_buffers(struct cx23885_tsport
*port
, char *reason
)
1649 struct cx23885_dmaqueue
*q
= &port
->mpegq
;
1650 struct cx23885_buffer
*buf
;
1651 unsigned long flags
;
1653 spin_lock_irqsave(&port
->slock
, flags
);
1654 while (!list_empty(&q
->active
)) {
1655 buf
= list_entry(q
->active
.next
, struct cx23885_buffer
,
1657 list_del(&buf
->queue
);
1658 vb2_buffer_done(&buf
->vb
.vb2_buf
, VB2_BUF_STATE_ERROR
);
1659 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1660 buf
, buf
->vb
.vb2_buf
.index
, reason
,
1661 (unsigned long)buf
->risc
.dma
);
1663 spin_unlock_irqrestore(&port
->slock
, flags
);
1666 void cx23885_cancel_buffers(struct cx23885_tsport
*port
)
1668 dprintk(1, "%s()\n", __func__
);
1669 cx23885_stop_dma(port
);
1670 do_cancel_buffers(port
, "cancel");
1673 int cx23885_irq_417(struct cx23885_dev
*dev
, u32 status
)
1675 /* FIXME: port1 assumption here. */
1676 struct cx23885_tsport
*port
= &dev
->ts1
;
1683 count
= cx_read(port
->reg_gpcnt
);
1684 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1685 status
, cx_read(port
->reg_ts_int_msk
), count
);
1687 if ((status
& VID_B_MSK_BAD_PKT
) ||
1688 (status
& VID_B_MSK_OPC_ERR
) ||
1689 (status
& VID_B_MSK_VBI_OPC_ERR
) ||
1690 (status
& VID_B_MSK_SYNC
) ||
1691 (status
& VID_B_MSK_VBI_SYNC
) ||
1692 (status
& VID_B_MSK_OF
) ||
1693 (status
& VID_B_MSK_VBI_OF
)) {
1694 pr_err("%s: V4L mpeg risc op code error, status = 0x%x\n",
1696 if (status
& VID_B_MSK_BAD_PKT
)
1697 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1698 if (status
& VID_B_MSK_OPC_ERR
)
1699 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1700 if (status
& VID_B_MSK_VBI_OPC_ERR
)
1701 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1702 if (status
& VID_B_MSK_SYNC
)
1703 dprintk(1, " VID_B_MSK_SYNC\n");
1704 if (status
& VID_B_MSK_VBI_SYNC
)
1705 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1706 if (status
& VID_B_MSK_OF
)
1707 dprintk(1, " VID_B_MSK_OF\n");
1708 if (status
& VID_B_MSK_VBI_OF
)
1709 dprintk(1, " VID_B_MSK_VBI_OF\n");
1711 cx_clear(port
->reg_dma_ctl
, port
->dma_ctl_val
);
1712 cx23885_sram_channel_dump(dev
,
1713 &dev
->sram_channels
[port
->sram_chno
]);
1714 cx23885_417_check_encoder(dev
);
1715 } else if (status
& VID_B_MSK_RISCI1
) {
1716 dprintk(7, " VID_B_MSK_RISCI1\n");
1717 spin_lock(&port
->slock
);
1718 cx23885_wakeup(port
, &port
->mpegq
, count
);
1719 spin_unlock(&port
->slock
);
1722 cx_write(port
->reg_ts_int_stat
, status
);
1729 static int cx23885_irq_ts(struct cx23885_tsport
*port
, u32 status
)
1731 struct cx23885_dev
*dev
= port
->dev
;
1735 if ((status
& VID_BC_MSK_OPC_ERR
) ||
1736 (status
& VID_BC_MSK_BAD_PKT
) ||
1737 (status
& VID_BC_MSK_SYNC
) ||
1738 (status
& VID_BC_MSK_OF
)) {
1740 if (status
& VID_BC_MSK_OPC_ERR
)
1741 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1742 VID_BC_MSK_OPC_ERR
);
1744 if (status
& VID_BC_MSK_BAD_PKT
)
1745 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1746 VID_BC_MSK_BAD_PKT
);
1748 if (status
& VID_BC_MSK_SYNC
)
1749 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1752 if (status
& VID_BC_MSK_OF
)
1753 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1756 pr_err("%s: mpeg risc op code error\n", dev
->name
);
1758 cx_clear(port
->reg_dma_ctl
, port
->dma_ctl_val
);
1759 cx23885_sram_channel_dump(dev
,
1760 &dev
->sram_channels
[port
->sram_chno
]);
1762 } else if (status
& VID_BC_MSK_RISCI1
) {
1764 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1
);
1766 spin_lock(&port
->slock
);
1767 count
= cx_read(port
->reg_gpcnt
);
1768 cx23885_wakeup(port
, &port
->mpegq
, count
);
1769 spin_unlock(&port
->slock
);
1773 cx_write(port
->reg_ts_int_stat
, status
);
1780 static irqreturn_t
cx23885_irq(int irq
, void *dev_id
)
1782 struct cx23885_dev
*dev
= dev_id
;
1783 struct cx23885_tsport
*ts1
= &dev
->ts1
;
1784 struct cx23885_tsport
*ts2
= &dev
->ts2
;
1785 u32 pci_status
, pci_mask
;
1786 u32 vida_status
, vida_mask
;
1787 u32 audint_status
, audint_mask
;
1788 u32 ts1_status
, ts1_mask
;
1789 u32 ts2_status
, ts2_mask
;
1790 int vida_count
= 0, ts1_count
= 0, ts2_count
= 0, handled
= 0;
1791 int audint_count
= 0;
1792 bool subdev_handled
;
1794 pci_status
= cx_read(PCI_INT_STAT
);
1795 pci_mask
= cx23885_irq_get_mask(dev
);
1796 if ((pci_status
& pci_mask
) == 0) {
1797 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1798 pci_status
, pci_mask
);
1802 vida_status
= cx_read(VID_A_INT_STAT
);
1803 vida_mask
= cx_read(VID_A_INT_MSK
);
1804 audint_status
= cx_read(AUDIO_INT_INT_STAT
);
1805 audint_mask
= cx_read(AUDIO_INT_INT_MSK
);
1806 ts1_status
= cx_read(VID_B_INT_STAT
);
1807 ts1_mask
= cx_read(VID_B_INT_MSK
);
1808 ts2_status
= cx_read(VID_C_INT_STAT
);
1809 ts2_mask
= cx_read(VID_C_INT_MSK
);
1811 if (((pci_status
& pci_mask
) == 0) &&
1812 ((ts2_status
& ts2_mask
) == 0) &&
1813 ((ts1_status
& ts1_mask
) == 0))
1816 vida_count
= cx_read(VID_A_GPCNT
);
1817 audint_count
= cx_read(AUD_INT_A_GPCNT
);
1818 ts1_count
= cx_read(ts1
->reg_gpcnt
);
1819 ts2_count
= cx_read(ts2
->reg_gpcnt
);
1820 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1821 pci_status
, pci_mask
);
1822 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1823 vida_status
, vida_mask
, vida_count
);
1824 dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1825 audint_status
, audint_mask
, audint_count
);
1826 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1827 ts1_status
, ts1_mask
, ts1_count
);
1828 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1829 ts2_status
, ts2_mask
, ts2_count
);
1831 if (pci_status
& (PCI_MSK_RISC_RD
| PCI_MSK_RISC_WR
|
1832 PCI_MSK_AL_RD
| PCI_MSK_AL_WR
| PCI_MSK_APB_DMA
|
1833 PCI_MSK_VID_C
| PCI_MSK_VID_B
| PCI_MSK_VID_A
|
1834 PCI_MSK_AUD_INT
| PCI_MSK_AUD_EXT
|
1835 PCI_MSK_GPIO0
| PCI_MSK_GPIO1
|
1836 PCI_MSK_AV_CORE
| PCI_MSK_IR
)) {
1838 if (pci_status
& PCI_MSK_RISC_RD
)
1839 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1842 if (pci_status
& PCI_MSK_RISC_WR
)
1843 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1846 if (pci_status
& PCI_MSK_AL_RD
)
1847 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1850 if (pci_status
& PCI_MSK_AL_WR
)
1851 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1854 if (pci_status
& PCI_MSK_APB_DMA
)
1855 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1858 if (pci_status
& PCI_MSK_VID_C
)
1859 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1862 if (pci_status
& PCI_MSK_VID_B
)
1863 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1866 if (pci_status
& PCI_MSK_VID_A
)
1867 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1870 if (pci_status
& PCI_MSK_AUD_INT
)
1871 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1874 if (pci_status
& PCI_MSK_AUD_EXT
)
1875 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1878 if (pci_status
& PCI_MSK_GPIO0
)
1879 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1882 if (pci_status
& PCI_MSK_GPIO1
)
1883 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1886 if (pci_status
& PCI_MSK_AV_CORE
)
1887 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1890 if (pci_status
& PCI_MSK_IR
)
1891 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1895 if (cx23885_boards
[dev
->board
].ci_type
== 1 &&
1896 (pci_status
& (PCI_MSK_GPIO1
| PCI_MSK_GPIO0
)))
1897 handled
+= netup_ci_slot_status(dev
, pci_status
);
1899 if (cx23885_boards
[dev
->board
].ci_type
== 2 &&
1900 (pci_status
& PCI_MSK_GPIO0
))
1901 handled
+= altera_ci_irq(dev
);
1904 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_DVB
)
1905 handled
+= cx23885_irq_ts(ts1
, ts1_status
);
1907 if (cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
)
1908 handled
+= cx23885_irq_417(dev
, ts1_status
);
1912 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_DVB
)
1913 handled
+= cx23885_irq_ts(ts2
, ts2_status
);
1915 if (cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_ENCODER
)
1916 handled
+= cx23885_irq_417(dev
, ts2_status
);
1920 handled
+= cx23885_video_irq(dev
, vida_status
);
1923 handled
+= cx23885_audio_irq(dev
, audint_status
, audint_mask
);
1925 if (pci_status
& PCI_MSK_IR
) {
1926 subdev_handled
= false;
1927 v4l2_subdev_call(dev
->sd_ir
, core
, interrupt_service_routine
,
1928 pci_status
, &subdev_handled
);
1933 if ((pci_status
& pci_mask
) & PCI_MSK_AV_CORE
) {
1934 cx23885_irq_disable(dev
, PCI_MSK_AV_CORE
);
1935 schedule_work(&dev
->cx25840_work
);
1940 cx_write(PCI_INT_STAT
, pci_status
& pci_mask
);
1942 return IRQ_RETVAL(handled
);
1945 static void cx23885_v4l2_dev_notify(struct v4l2_subdev
*sd
,
1946 unsigned int notification
, void *arg
)
1948 struct cx23885_dev
*dev
;
1953 dev
= to_cx23885(sd
->v4l2_dev
);
1955 switch (notification
) {
1956 case V4L2_SUBDEV_IR_RX_NOTIFY
: /* Possibly called in an IRQ context */
1957 if (sd
== dev
->sd_ir
)
1958 cx23885_ir_rx_v4l2_dev_notify(sd
, *(u32
*)arg
);
1960 case V4L2_SUBDEV_IR_TX_NOTIFY
: /* Possibly called in an IRQ context */
1961 if (sd
== dev
->sd_ir
)
1962 cx23885_ir_tx_v4l2_dev_notify(sd
, *(u32
*)arg
);
1967 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev
*dev
)
1969 INIT_WORK(&dev
->cx25840_work
, cx23885_av_work_handler
);
1970 INIT_WORK(&dev
->ir_rx_work
, cx23885_ir_rx_work_handler
);
1971 INIT_WORK(&dev
->ir_tx_work
, cx23885_ir_tx_work_handler
);
1972 dev
->v4l2_dev
.notify
= cx23885_v4l2_dev_notify
;
1975 static inline int encoder_on_portb(struct cx23885_dev
*dev
)
1977 return cx23885_boards
[dev
->board
].portb
== CX23885_MPEG_ENCODER
;
1980 static inline int encoder_on_portc(struct cx23885_dev
*dev
)
1982 return cx23885_boards
[dev
->board
].portc
== CX23885_MPEG_ENCODER
;
1985 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1986 * registers depending on the board configuration (and whether the
1987 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1988 * be pushed into the correct hardware register, regardless of the
1989 * physical location. Certain registers are shared so we sanity check
1990 * and report errors if we think we're tampering with a GPIo that might
1991 * be assigned to the encoder (and used for the host bus).
1993 * GPIO 2 through 0 - On the cx23885 bridge
1994 * GPIO 18 through 3 - On the cx23417 host bus interface
1995 * GPIO 23 through 19 - On the cx25840 a/v core
1997 void cx23885_gpio_set(struct cx23885_dev
*dev
, u32 mask
)
2000 cx_set(GP0_IO
, mask
& 0x7);
2002 if (mask
& 0x0007fff8) {
2003 if (encoder_on_portb(dev
) || encoder_on_portc(dev
))
2004 pr_err("%s: Setting GPIO on encoder ports\n",
2006 cx_set(MC417_RWD
, (mask
& 0x0007fff8) >> 3);
2010 if (mask
& 0x00f80000)
2011 pr_info("%s: Unsupported\n", dev
->name
);
2014 void cx23885_gpio_clear(struct cx23885_dev
*dev
, u32 mask
)
2016 if (mask
& 0x00000007)
2017 cx_clear(GP0_IO
, mask
& 0x7);
2019 if (mask
& 0x0007fff8) {
2020 if (encoder_on_portb(dev
) || encoder_on_portc(dev
))
2021 pr_err("%s: Clearing GPIO moving on encoder ports\n",
2023 cx_clear(MC417_RWD
, (mask
& 0x7fff8) >> 3);
2027 if (mask
& 0x00f80000)
2028 pr_info("%s: Unsupported\n", dev
->name
);
2031 u32
cx23885_gpio_get(struct cx23885_dev
*dev
, u32 mask
)
2033 if (mask
& 0x00000007)
2034 return (cx_read(GP0_IO
) >> 8) & mask
& 0x7;
2036 if (mask
& 0x0007fff8) {
2037 if (encoder_on_portb(dev
) || encoder_on_portc(dev
))
2038 pr_err("%s: Reading GPIO moving on encoder ports\n",
2040 return (cx_read(MC417_RWD
) & ((mask
& 0x7fff8) >> 3)) << 3;
2044 if (mask
& 0x00f80000)
2045 pr_info("%s: Unsupported\n", dev
->name
);
2050 void cx23885_gpio_enable(struct cx23885_dev
*dev
, u32 mask
, int asoutput
)
2052 if ((mask
& 0x00000007) && asoutput
)
2053 cx_set(GP0_IO
, (mask
& 0x7) << 16);
2054 else if ((mask
& 0x00000007) && !asoutput
)
2055 cx_clear(GP0_IO
, (mask
& 0x7) << 16);
2057 if (mask
& 0x0007fff8) {
2058 if (encoder_on_portb(dev
) || encoder_on_portc(dev
))
2059 pr_err("%s: Enabling GPIO on encoder ports\n",
2063 /* MC417_OEN is active low for output, write 1 for an input */
2064 if ((mask
& 0x0007fff8) && asoutput
)
2065 cx_clear(MC417_OEN
, (mask
& 0x7fff8) >> 3);
2067 else if ((mask
& 0x0007fff8) && !asoutput
)
2068 cx_set(MC417_OEN
, (mask
& 0x7fff8) >> 3);
2075 } const broken_dev_id
[] = {
2077 * https://openbenchmarking.org/system/1703021-RI-AMDZEN08075/Ryzen%207%201800X/lspci,
2078 * 0x1451 is PCI ID for the IOMMU found on Ryzen
2080 { PCI_VENDOR_ID_AMD
, 0x1451 },
2081 /* According to sudo lspci -nn,
2082 * 0x1423 is the PCI ID for the IOMMU found on Kaveri
2084 { PCI_VENDOR_ID_AMD
, 0x1423 },
2085 /* 0x1481 is the PCI ID for the IOMMU found on Starship/Matisse
2087 { PCI_VENDOR_ID_AMD
, 0x1481 },
2088 /* 0x1419 is the PCI ID for the IOMMU found on 15h (Models 10h-1fh) family
2090 { PCI_VENDOR_ID_AMD
, 0x1419 },
2091 /* 0x1631 is the PCI ID for the IOMMU found on Renoir/Cezanne
2093 { PCI_VENDOR_ID_AMD
, 0x1631 },
2094 /* 0x5a23 is the PCI ID for the IOMMU found on RD890S/RD990
2096 { PCI_VENDOR_ID_ATI
, 0x5a23 },
2099 static bool cx23885_does_need_dma_reset(void)
2102 struct pci_dev
*pdev
= NULL
;
2104 if (dma_reset_workaround
== 0)
2106 else if (dma_reset_workaround
== 2)
2109 for (i
= 0; i
< ARRAY_SIZE(broken_dev_id
); i
++) {
2110 pdev
= pci_get_device(broken_dev_id
[i
].vendor
,
2111 broken_dev_id
[i
].dev
, NULL
);
2120 static int cx23885_initdev(struct pci_dev
*pci_dev
,
2121 const struct pci_device_id
*pci_id
)
2123 struct cx23885_dev
*dev
;
2124 struct v4l2_ctrl_handler
*hdl
;
2127 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
2131 dev
->need_dma_reset
= cx23885_does_need_dma_reset();
2133 err
= v4l2_device_register(&pci_dev
->dev
, &dev
->v4l2_dev
);
2137 hdl
= &dev
->ctrl_handler
;
2138 v4l2_ctrl_handler_init(hdl
, 6);
2143 dev
->v4l2_dev
.ctrl_handler
= hdl
;
2145 /* Prepare to handle notifications from subdevices */
2146 cx23885_v4l2_dev_notify_init(dev
);
2150 if (pci_enable_device(pci_dev
)) {
2155 if (cx23885_dev_setup(dev
) < 0) {
2160 /* print pci info */
2161 dev
->pci_rev
= pci_dev
->revision
;
2162 pci_read_config_byte(pci_dev
, PCI_LATENCY_TIMER
, &dev
->pci_lat
);
2163 pr_info("%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
2165 pci_name(pci_dev
), dev
->pci_rev
, pci_dev
->irq
,
2167 (unsigned long long)pci_resource_start(pci_dev
, 0));
2169 pci_set_master(pci_dev
);
2170 err
= dma_set_mask(&pci_dev
->dev
, 0xffffffff);
2172 pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev
->name
);
2173 goto fail_dma_set_mask
;
2176 err
= request_irq(pci_dev
->irq
, cx23885_irq
,
2177 IRQF_SHARED
, dev
->name
, dev
);
2179 pr_err("%s: can't get IRQ %d\n",
2180 dev
->name
, pci_dev
->irq
);
2181 goto fail_dma_set_mask
;
2184 switch (dev
->board
) {
2185 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI
:
2186 cx23885_irq_add_enable(dev
, PCI_MSK_GPIO1
| PCI_MSK_GPIO0
);
2188 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF
:
2189 cx23885_irq_add_enable(dev
, PCI_MSK_GPIO0
);
2194 * The CX2388[58] IR controller can start firing interrupts when
2195 * enabled, so these have to take place after the cx23885_irq() handler
2196 * is hooked up by the call to request_irq() above.
2198 cx23885_ir_pci_int_enable(dev
);
2199 cx23885_input_init(dev
);
2204 cx23885_dev_unregister(dev
);
2206 v4l2_ctrl_handler_free(hdl
);
2207 v4l2_device_unregister(&dev
->v4l2_dev
);
2213 static void cx23885_finidev(struct pci_dev
*pci_dev
)
2215 struct v4l2_device
*v4l2_dev
= pci_get_drvdata(pci_dev
);
2216 struct cx23885_dev
*dev
= to_cx23885(v4l2_dev
);
2218 cx23885_input_fini(dev
);
2219 cx23885_ir_fini(dev
);
2221 cx23885_shutdown(dev
);
2223 /* unregister stuff */
2224 free_irq(pci_dev
->irq
, dev
);
2226 pci_disable_device(pci_dev
);
2228 cx23885_dev_unregister(dev
);
2229 v4l2_ctrl_handler_free(&dev
->ctrl_handler
);
2230 v4l2_device_unregister(v4l2_dev
);
2234 static const struct pci_device_id cx23885_pci_tbl
[] = {
2239 .subvendor
= PCI_ANY_ID
,
2240 .subdevice
= PCI_ANY_ID
,
2245 .subvendor
= PCI_ANY_ID
,
2246 .subdevice
= PCI_ANY_ID
,
2248 /* --- end of list --- */
2251 MODULE_DEVICE_TABLE(pci
, cx23885_pci_tbl
);
2253 static struct pci_driver cx23885_pci_driver
= {
2255 .id_table
= cx23885_pci_tbl
,
2256 .probe
= cx23885_initdev
,
2257 .remove
= cx23885_finidev
,
2260 static int __init
cx23885_init(void)
2262 pr_info("cx23885 driver version %s loaded\n",
2264 return pci_register_driver(&cx23885_pci_driver
);
2267 static void __exit
cx23885_fini(void)
2269 pci_unregister_driver(&cx23885_pci_driver
);
2272 module_init(cx23885_init
);
2273 module_exit(cx23885_fini
);