1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Driver for the Conexant CX23885 PCIe bridge
5 * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
15 static unsigned int vbibufs
= 4;
16 module_param(vbibufs
, int, 0644);
17 MODULE_PARM_DESC(vbibufs
, "number of vbi buffers, range 2-32");
19 static unsigned int vbi_debug
;
20 module_param(vbi_debug
, int, 0644);
21 MODULE_PARM_DESC(vbi_debug
, "enable debug messages [vbi]");
23 #define dprintk(level, fmt, arg...)\
24 do { if (vbi_debug >= level)\
25 printk(KERN_DEBUG pr_fmt("%s: vbi:" fmt), \
29 /* ------------------------------------------------------------------ */
31 #define VBI_LINE_LENGTH 1440
32 #define VBI_NTSC_LINE_COUNT 12
33 #define VBI_PAL_LINE_COUNT 18
36 int cx23885_vbi_fmt(struct file
*file
, void *priv
,
37 struct v4l2_format
*f
)
39 struct cx23885_dev
*dev
= video_drvdata(file
);
41 f
->fmt
.vbi
.sampling_rate
= 27000000;
42 f
->fmt
.vbi
.samples_per_line
= VBI_LINE_LENGTH
;
43 f
->fmt
.vbi
.sample_format
= V4L2_PIX_FMT_GREY
;
44 f
->fmt
.vbi
.offset
= 0;
46 if (dev
->tvnorm
& V4L2_STD_525_60
) {
48 f
->fmt
.vbi
.start
[0] = V4L2_VBI_ITU_525_F1_START
+ 9;
49 f
->fmt
.vbi
.start
[1] = V4L2_VBI_ITU_525_F2_START
+ 9;
50 f
->fmt
.vbi
.count
[0] = VBI_NTSC_LINE_COUNT
;
51 f
->fmt
.vbi
.count
[1] = VBI_NTSC_LINE_COUNT
;
52 } else if (dev
->tvnorm
& V4L2_STD_625_50
) {
54 f
->fmt
.vbi
.start
[0] = V4L2_VBI_ITU_625_F1_START
+ 5;
55 f
->fmt
.vbi
.start
[1] = V4L2_VBI_ITU_625_F2_START
+ 5;
56 f
->fmt
.vbi
.count
[0] = VBI_PAL_LINE_COUNT
;
57 f
->fmt
.vbi
.count
[1] = VBI_PAL_LINE_COUNT
;
63 /* We're given the Video Interrupt status register.
64 * The cx23885_video_irq() func has already validated
65 * the potential error bits, we just need to
66 * deal with vbi payload and return indication if
67 * we actually processed any payload.
69 int cx23885_vbi_irq(struct cx23885_dev
*dev
, u32 status
)
74 if (status
& VID_BC_MSK_VBI_RISCI1
) {
75 dprintk(1, "%s() VID_BC_MSK_VBI_RISCI1\n", __func__
);
76 spin_lock(&dev
->slock
);
77 count
= cx_read(VBI_A_GPCNT
);
78 cx23885_video_wakeup(dev
, &dev
->vbiq
, count
);
79 spin_unlock(&dev
->slock
);
86 static int cx23885_start_vbi_dma(struct cx23885_dev
*dev
,
87 struct cx23885_dmaqueue
*q
,
88 struct cx23885_buffer
*buf
)
90 dprintk(1, "%s()\n", __func__
);
92 /* setup fifo + format */
93 cx23885_sram_channel_setup(dev
, &dev
->sram_channels
[SRAM_CH02
],
94 VBI_LINE_LENGTH
, buf
->risc
.dma
);
97 cx_write(VID_A_VBI_CTRL
, 3);
98 cx_write(VBI_A_GPCNT_CTL
, 3);
102 cx23885_irq_add_enable(dev
, 0x01);
103 cx_set(VID_A_INT_MSK
, 0x000022);
106 cx_set(DEV_CNTRL2
, (1<<5));
107 cx_set(VID_A_DMA_CTL
, 0x22); /* FIFO and RISC enable */
112 /* ------------------------------------------------------------------ */
114 static int queue_setup(struct vb2_queue
*q
,
115 unsigned int *num_buffers
, unsigned int *num_planes
,
116 unsigned int sizes
[], struct device
*alloc_devs
[])
118 struct cx23885_dev
*dev
= q
->drv_priv
;
119 unsigned lines
= VBI_PAL_LINE_COUNT
;
121 if (dev
->tvnorm
& V4L2_STD_525_60
)
122 lines
= VBI_NTSC_LINE_COUNT
;
124 sizes
[0] = lines
* VBI_LINE_LENGTH
* 2;
128 static int buffer_prepare(struct vb2_buffer
*vb
)
130 struct vb2_v4l2_buffer
*vbuf
= to_vb2_v4l2_buffer(vb
);
131 struct cx23885_dev
*dev
= vb
->vb2_queue
->drv_priv
;
132 struct cx23885_buffer
*buf
= container_of(vbuf
,
133 struct cx23885_buffer
, vb
);
134 struct sg_table
*sgt
= vb2_dma_sg_plane_desc(vb
, 0);
135 unsigned lines
= VBI_PAL_LINE_COUNT
;
137 if (dev
->tvnorm
& V4L2_STD_525_60
)
138 lines
= VBI_NTSC_LINE_COUNT
;
140 if (vb2_plane_size(vb
, 0) < lines
* VBI_LINE_LENGTH
* 2)
142 vb2_set_plane_payload(vb
, 0, lines
* VBI_LINE_LENGTH
* 2);
144 cx23885_risc_vbibuffer(dev
->pci
, &buf
->risc
,
146 0, VBI_LINE_LENGTH
* lines
,
152 static void buffer_finish(struct vb2_buffer
*vb
)
154 struct vb2_v4l2_buffer
*vbuf
= to_vb2_v4l2_buffer(vb
);
155 struct cx23885_buffer
*buf
= container_of(vbuf
,
156 struct cx23885_buffer
, vb
);
158 cx23885_free_buffer(vb
->vb2_queue
->drv_priv
, buf
);
162 * The risc program for each buffer works as follows: it starts with a simple
163 * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
164 * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
167 * This is the risc program of the first buffer to be queued if the active list
168 * is empty and it just keeps DMAing this buffer without generating any
171 * If a new buffer is added then the initial JUMP in the code for that buffer
172 * will generate an interrupt which signals that the previous buffer has been
173 * DMAed successfully and that it can be returned to userspace.
175 * It also sets the final jump of the previous buffer to the start of the new
176 * buffer, thus chaining the new buffer into the DMA chain. This is a single
177 * atomic u32 write, so there is no race condition.
179 * The end-result of all this that you only get an interrupt when a buffer
180 * is ready, so the control flow is very easy.
182 static void buffer_queue(struct vb2_buffer
*vb
)
184 struct vb2_v4l2_buffer
*vbuf
= to_vb2_v4l2_buffer(vb
);
185 struct cx23885_dev
*dev
= vb
->vb2_queue
->drv_priv
;
186 struct cx23885_buffer
*buf
= container_of(vbuf
,
187 struct cx23885_buffer
, vb
);
188 struct cx23885_buffer
*prev
;
189 struct cx23885_dmaqueue
*q
= &dev
->vbiq
;
192 buf
->risc
.cpu
[1] = cpu_to_le32(buf
->risc
.dma
+ 12);
193 buf
->risc
.jmp
[0] = cpu_to_le32(RISC_JUMP
| RISC_CNT_INC
);
194 buf
->risc
.jmp
[1] = cpu_to_le32(buf
->risc
.dma
+ 12);
195 buf
->risc
.jmp
[2] = cpu_to_le32(0); /* bits 63-32 */
197 if (list_empty(&q
->active
)) {
198 spin_lock_irqsave(&dev
->slock
, flags
);
199 list_add_tail(&buf
->queue
, &q
->active
);
200 spin_unlock_irqrestore(&dev
->slock
, flags
);
201 dprintk(2, "[%p/%d] vbi_queue - first active\n",
202 buf
, buf
->vb
.vb2_buf
.index
);
205 buf
->risc
.cpu
[0] |= cpu_to_le32(RISC_IRQ1
);
206 prev
= list_entry(q
->active
.prev
, struct cx23885_buffer
,
208 spin_lock_irqsave(&dev
->slock
, flags
);
209 list_add_tail(&buf
->queue
, &q
->active
);
210 spin_unlock_irqrestore(&dev
->slock
, flags
);
211 prev
->risc
.jmp
[1] = cpu_to_le32(buf
->risc
.dma
);
212 dprintk(2, "[%p/%d] buffer_queue - append to active\n",
213 buf
, buf
->vb
.vb2_buf
.index
);
217 static int cx23885_start_streaming(struct vb2_queue
*q
, unsigned int count
)
219 struct cx23885_dev
*dev
= q
->drv_priv
;
220 struct cx23885_dmaqueue
*dmaq
= &dev
->vbiq
;
221 struct cx23885_buffer
*buf
= list_entry(dmaq
->active
.next
,
222 struct cx23885_buffer
, queue
);
224 cx23885_start_vbi_dma(dev
, dmaq
, buf
);
228 static void cx23885_stop_streaming(struct vb2_queue
*q
)
230 struct cx23885_dev
*dev
= q
->drv_priv
;
231 struct cx23885_dmaqueue
*dmaq
= &dev
->vbiq
;
234 cx_clear(VID_A_DMA_CTL
, 0x22); /* FIFO and RISC enable */
235 spin_lock_irqsave(&dev
->slock
, flags
);
236 while (!list_empty(&dmaq
->active
)) {
237 struct cx23885_buffer
*buf
= list_entry(dmaq
->active
.next
,
238 struct cx23885_buffer
, queue
);
240 list_del(&buf
->queue
);
241 vb2_buffer_done(&buf
->vb
.vb2_buf
, VB2_BUF_STATE_ERROR
);
243 spin_unlock_irqrestore(&dev
->slock
, flags
);
247 const struct vb2_ops cx23885_vbi_qops
= {
248 .queue_setup
= queue_setup
,
249 .buf_prepare
= buffer_prepare
,
250 .buf_finish
= buffer_finish
,
251 .buf_queue
= buffer_queue
,
252 .start_streaming
= cx23885_start_streaming
,
253 .stop_streaming
= cx23885_stop_streaming
,