1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Intel Corporation
5 * Based partially on Intel IPU4 driver written by
6 * Sakari Ailus <sakari.ailus@linux.intel.com>
7 * Samu Onkalo <samu.onkalo@intel.com>
8 * Jouni Högander <jouni.hogander@intel.com>
9 * Jouni Ukkonen <jouni.ukkonen@intel.com>
10 * Antti Laakso <antti.laakso@intel.com>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/property.h>
21 #include <linux/vmalloc.h>
22 #include <media/v4l2-ctrls.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-event.h>
25 #include <media/v4l2-fwnode.h>
26 #include <media/v4l2-ioctl.h>
27 #include <media/videobuf2-dma-sg.h>
29 #include "ipu3-cio2.h"
31 struct ipu3_cio2_fmt
{
38 * These are raw formats used in Intel's third generation of
39 * Image Processing Unit known as IPU3.
40 * 10bit raw bayer packed, 32 bytes for every 25 pixels,
41 * last LSB 6 bits unused.
43 static const struct ipu3_cio2_fmt formats
[] = {
44 { /* put default entry at beginning */
45 .mbus_code
= MEDIA_BUS_FMT_SGRBG10_1X10
,
46 .fourcc
= V4L2_PIX_FMT_IPU3_SGRBG10
,
49 .mbus_code
= MEDIA_BUS_FMT_SGBRG10_1X10
,
50 .fourcc
= V4L2_PIX_FMT_IPU3_SGBRG10
,
53 .mbus_code
= MEDIA_BUS_FMT_SBGGR10_1X10
,
54 .fourcc
= V4L2_PIX_FMT_IPU3_SBGGR10
,
57 .mbus_code
= MEDIA_BUS_FMT_SRGGB10_1X10
,
58 .fourcc
= V4L2_PIX_FMT_IPU3_SRGGB10
,
64 * cio2_find_format - lookup color format by fourcc or/and media bus code
65 * @pixelformat: fourcc to match, ignored if null
66 * @mbus_code: media bus code to match, ignored if null
68 static const struct ipu3_cio2_fmt
*cio2_find_format(const u32
*pixelformat
,
73 for (i
= 0; i
< ARRAY_SIZE(formats
); i
++) {
74 if (pixelformat
&& *pixelformat
!= formats
[i
].fourcc
)
76 if (mbus_code
&& *mbus_code
!= formats
[i
].mbus_code
)
85 static inline u32
cio2_bytesperline(const unsigned int width
)
88 * 64 bytes for every 50 pixels, the line length
89 * in bytes is multiple of 64 (line end alignment).
91 return DIV_ROUND_UP(width
, 50) * 64;
94 /**************** FBPT operations ****************/
96 static void cio2_fbpt_exit_dummy(struct cio2_device
*cio2
)
98 if (cio2
->dummy_lop
) {
99 dma_free_coherent(&cio2
->pci_dev
->dev
, CIO2_PAGE_SIZE
,
100 cio2
->dummy_lop
, cio2
->dummy_lop_bus_addr
);
101 cio2
->dummy_lop
= NULL
;
103 if (cio2
->dummy_page
) {
104 dma_free_coherent(&cio2
->pci_dev
->dev
, CIO2_PAGE_SIZE
,
105 cio2
->dummy_page
, cio2
->dummy_page_bus_addr
);
106 cio2
->dummy_page
= NULL
;
110 static int cio2_fbpt_init_dummy(struct cio2_device
*cio2
)
114 cio2
->dummy_page
= dma_alloc_coherent(&cio2
->pci_dev
->dev
,
116 &cio2
->dummy_page_bus_addr
,
118 cio2
->dummy_lop
= dma_alloc_coherent(&cio2
->pci_dev
->dev
,
120 &cio2
->dummy_lop_bus_addr
,
122 if (!cio2
->dummy_page
|| !cio2
->dummy_lop
) {
123 cio2_fbpt_exit_dummy(cio2
);
127 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
128 * Initialize each entry to dummy_page bus base address.
130 for (i
= 0; i
< CIO2_PAGE_SIZE
/ sizeof(*cio2
->dummy_lop
); i
++)
131 cio2
->dummy_lop
[i
] = cio2
->dummy_page_bus_addr
>> PAGE_SHIFT
;
136 static void cio2_fbpt_entry_enable(struct cio2_device
*cio2
,
137 struct cio2_fbpt_entry entry
[CIO2_MAX_LOPS
])
140 * The CPU first initializes some fields in fbpt, then sets
141 * the VALID bit, this barrier is to ensure that the DMA(device)
142 * does not see the VALID bit enabled before other fields are
143 * initialized; otherwise it could lead to havoc.
148 * Request interrupts for start and completion
149 * Valid bit is applicable only to 1st entry
151 entry
[0].first_entry
.ctrl
= CIO2_FBPT_CTRL_VALID
|
152 CIO2_FBPT_CTRL_IOC
| CIO2_FBPT_CTRL_IOS
;
155 /* Initialize fpbt entries to point to dummy frame */
156 static void cio2_fbpt_entry_init_dummy(struct cio2_device
*cio2
,
157 struct cio2_fbpt_entry
158 entry
[CIO2_MAX_LOPS
])
162 entry
[0].first_entry
.first_page_offset
= 0;
163 entry
[1].second_entry
.num_of_pages
=
164 CIO2_PAGE_SIZE
/ sizeof(u32
) * CIO2_MAX_LOPS
;
165 entry
[1].second_entry
.last_page_available_bytes
= CIO2_PAGE_SIZE
- 1;
167 for (i
= 0; i
< CIO2_MAX_LOPS
; i
++)
168 entry
[i
].lop_page_addr
= cio2
->dummy_lop_bus_addr
>> PAGE_SHIFT
;
170 cio2_fbpt_entry_enable(cio2
, entry
);
173 /* Initialize fpbt entries to point to a given buffer */
174 static void cio2_fbpt_entry_init_buf(struct cio2_device
*cio2
,
175 struct cio2_buffer
*b
,
176 struct cio2_fbpt_entry
177 entry
[CIO2_MAX_LOPS
])
179 struct vb2_buffer
*vb
= &b
->vbb
.vb2_buf
;
180 unsigned int length
= vb
->planes
[0].length
;
183 entry
[0].first_entry
.first_page_offset
= b
->offset
;
184 remaining
= length
+ entry
[0].first_entry
.first_page_offset
;
185 entry
[1].second_entry
.num_of_pages
=
186 DIV_ROUND_UP(remaining
, CIO2_PAGE_SIZE
);
188 * last_page_available_bytes has the offset of the last byte in the
189 * last page which is still accessible by DMA. DMA cannot access
190 * beyond this point. Valid range for this is from 0 to 4095.
191 * 0 indicates 1st byte in the page is DMA accessible.
192 * 4095 (CIO2_PAGE_SIZE - 1) means every single byte in the last page
193 * is available for DMA transfer.
195 entry
[1].second_entry
.last_page_available_bytes
=
196 (remaining
& ~PAGE_MASK
) ?
197 (remaining
& ~PAGE_MASK
) - 1 :
202 while (remaining
> 0) {
203 entry
->lop_page_addr
= b
->lop_bus_addr
[i
] >> PAGE_SHIFT
;
204 remaining
-= CIO2_PAGE_SIZE
/ sizeof(u32
) * CIO2_PAGE_SIZE
;
210 * The first not meaningful FBPT entry should point to a valid LOP
212 entry
->lop_page_addr
= cio2
->dummy_lop_bus_addr
>> PAGE_SHIFT
;
214 cio2_fbpt_entry_enable(cio2
, entry
);
217 static int cio2_fbpt_init(struct cio2_device
*cio2
, struct cio2_queue
*q
)
219 struct device
*dev
= &cio2
->pci_dev
->dev
;
221 q
->fbpt
= dma_alloc_coherent(dev
, CIO2_FBPT_SIZE
, &q
->fbpt_bus_addr
,
226 memset(q
->fbpt
, 0, CIO2_FBPT_SIZE
);
231 static void cio2_fbpt_exit(struct cio2_queue
*q
, struct device
*dev
)
233 dma_free_coherent(dev
, CIO2_FBPT_SIZE
, q
->fbpt
, q
->fbpt_bus_addr
);
236 /**************** CSI2 hardware setup ****************/
239 * The CSI2 receiver has several parameters affecting
240 * the receiver timings. These depend on the MIPI bus frequency
241 * F in Hz (sensor transmitter rate) as follows:
242 * register value = (A/1e9 + B * UI) / COUNT_ACC
244 * UI = 1 / (2 * F) in seconds
245 * COUNT_ACC = counter accuracy in seconds
246 * For IPU3 COUNT_ACC = 0.0625
248 * A and B are coefficients from the table below,
249 * depending whether the register minimum or maximum value is
253 * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
254 * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
256 * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
257 * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
258 * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
259 * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
260 * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
261 * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
262 * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
263 * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
265 * We use the minimum values of both A and B.
269 * shift for keeping value range suitable for 32-bit integer arithmetics
271 #define LIMIT_SHIFT 8
273 static s32
cio2_rx_timing(s32 a
, s32 b
, s64 freq
, int def
)
275 const u32 accinv
= 16; /* invert of counter resolution */
276 const u32 uiinv
= 500000000; /* 1e9 / 2 */
279 freq
>>= LIMIT_SHIFT
;
281 if (WARN_ON(freq
<= 0 || freq
> S32_MAX
))
284 * b could be 0, -2 or -8, so |accinv * b| is always
285 * less than (1 << ds) and thus |r| < 500000000.
287 r
= accinv
* b
* (uiinv
>> LIMIT_SHIFT
);
289 /* max value of a is 95 */
295 /* Calculate the the delay value for termination enable of clock lane HS Rx */
296 static int cio2_csi2_calc_timing(struct cio2_device
*cio2
, struct cio2_queue
*q
,
297 struct cio2_csi2_timing
*timing
)
299 struct device
*dev
= &cio2
->pci_dev
->dev
;
300 struct v4l2_querymenu qm
= {.id
= V4L2_CID_LINK_FREQ
, };
301 struct v4l2_ctrl
*link_freq
;
308 link_freq
= v4l2_ctrl_find(q
->sensor
->ctrl_handler
, V4L2_CID_LINK_FREQ
);
310 dev_err(dev
, "failed to find LINK_FREQ\n");
314 qm
.index
= v4l2_ctrl_g_ctrl(link_freq
);
315 r
= v4l2_querymenu(q
->sensor
->ctrl_handler
, &qm
);
317 dev_err(dev
, "failed to get menu item\n");
322 dev_err(dev
, "error invalid link_freq\n");
327 timing
->clk_termen
= cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A
,
328 CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B
,
330 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT
);
331 timing
->clk_settle
= cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A
,
332 CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B
,
334 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT
);
335 timing
->dat_termen
= cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A
,
336 CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B
,
338 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT
);
339 timing
->dat_settle
= cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A
,
340 CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B
,
342 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT
);
344 dev_dbg(dev
, "freq ct value is %d\n", timing
->clk_termen
);
345 dev_dbg(dev
, "freq cs value is %d\n", timing
->clk_settle
);
346 dev_dbg(dev
, "freq dt value is %d\n", timing
->dat_termen
);
347 dev_dbg(dev
, "freq ds value is %d\n", timing
->dat_settle
);
352 static int cio2_hw_init(struct cio2_device
*cio2
, struct cio2_queue
*q
)
354 static const int NUM_VCS
= 4;
355 static const int SID
; /* Stream id */
356 static const int ENTRY
;
357 static const int FBPT_WIDTH
= DIV_ROUND_UP(CIO2_MAX_LOPS
,
358 CIO2_FBPT_SUBENTRY_UNIT
);
359 const u32 num_buffers1
= CIO2_MAX_BUFFERS
- 1;
360 const struct ipu3_cio2_fmt
*fmt
;
361 void __iomem
*const base
= cio2
->base
;
362 u8 lanes
, csi2bus
= q
->csi2
.port
;
363 u8 sensor_vc
= SENSOR_VIR_CH_DFLT
;
364 struct cio2_csi2_timing timing
;
367 fmt
= cio2_find_format(NULL
, &q
->subdev_fmt
.code
);
371 lanes
= q
->csi2
.lanes
;
373 r
= cio2_csi2_calc_timing(cio2
, q
, &timing
);
377 writel(timing
.clk_termen
, q
->csi_rx_base
+
378 CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX
));
379 writel(timing
.clk_settle
, q
->csi_rx_base
+
380 CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX
));
382 for (i
= 0; i
< lanes
; i
++) {
383 writel(timing
.dat_termen
, q
->csi_rx_base
+
384 CIO2_REG_CSIRX_DLY_CNT_TERMEN(i
));
385 writel(timing
.dat_settle
, q
->csi_rx_base
+
386 CIO2_REG_CSIRX_DLY_CNT_SETTLE(i
));
389 writel(CIO2_PBM_WMCTRL1_MIN_2CK
|
390 CIO2_PBM_WMCTRL1_MID1_2CK
|
391 CIO2_PBM_WMCTRL1_MID2_2CK
, base
+ CIO2_REG_PBM_WMCTRL1
);
392 writel(CIO2_PBM_WMCTRL2_HWM_2CK
<< CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT
|
393 CIO2_PBM_WMCTRL2_LWM_2CK
<< CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT
|
394 CIO2_PBM_WMCTRL2_OBFFWM_2CK
<<
395 CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT
|
396 CIO2_PBM_WMCTRL2_TRANSDYN
<< CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT
|
397 CIO2_PBM_WMCTRL2_OBFF_MEM_EN
, base
+ CIO2_REG_PBM_WMCTRL2
);
398 writel(CIO2_PBM_ARB_CTRL_LANES_DIV
<<
399 CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT
|
400 CIO2_PBM_ARB_CTRL_LE_EN
|
401 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN
<<
402 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT
|
403 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP
<<
404 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT
,
405 base
+ CIO2_REG_PBM_ARB_CTRL
);
406 writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK
,
407 q
->csi_rx_base
+ CIO2_REG_CSIRX_STATUS_DLANE_HS
);
408 writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK
,
409 q
->csi_rx_base
+ CIO2_REG_CSIRX_STATUS_DLANE_LP
);
411 writel(CIO2_FB_HPLL_FREQ
, base
+ CIO2_REG_FB_HPLL_FREQ
);
412 writel(CIO2_ISCLK_RATIO
, base
+ CIO2_REG_ISCLK_RATIO
);
414 /* Configure MIPI backend */
415 for (i
= 0; i
< NUM_VCS
; i
++)
416 writel(1, q
->csi_rx_base
+ CIO2_REG_MIPIBE_SP_LUT_ENTRY(i
));
418 /* There are 16 short packet LUT entry */
419 for (i
= 0; i
< 16; i
++)
420 writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD
,
421 q
->csi_rx_base
+ CIO2_REG_MIPIBE_LP_LUT_ENTRY(i
));
422 writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD
,
423 q
->csi_rx_base
+ CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD
);
425 writel(CIO2_INT_EN_EXT_IE_MASK
, base
+ CIO2_REG_INT_EN_EXT_IE
);
426 writel(CIO2_IRQCTRL_MASK
, q
->csi_rx_base
+ CIO2_REG_IRQCTRL_MASK
);
427 writel(CIO2_IRQCTRL_MASK
, q
->csi_rx_base
+ CIO2_REG_IRQCTRL_ENABLE
);
428 writel(0, q
->csi_rx_base
+ CIO2_REG_IRQCTRL_EDGE
);
429 writel(0, q
->csi_rx_base
+ CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE
);
430 writel(CIO2_INT_EN_EXT_OE_MASK
, base
+ CIO2_REG_INT_EN_EXT_OE
);
432 writel(CIO2_REG_INT_EN_IRQ
| CIO2_INT_IOC(CIO2_DMA_CHAN
) |
433 CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN
),
434 base
+ CIO2_REG_INT_EN
);
436 writel((CIO2_PXM_PXF_FMT_CFG_BPP_10
| CIO2_PXM_PXF_FMT_CFG_PCK_64B
)
437 << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT
,
438 base
+ CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus
));
439 writel(SID
<< CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT
|
440 sensor_vc
<< CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT
|
441 fmt
->mipicode
<< CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT
,
442 q
->csi_rx_base
+ CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY
));
443 writel(0, q
->csi_rx_base
+ CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc
));
444 writel(0, q
->csi_rx_base
+ CIO2_REG_MIPIBE_FORCE_RAW8
);
445 writel(0, base
+ CIO2_REG_PXM_SID2BID0(csi2bus
));
447 writel(lanes
, q
->csi_rx_base
+ CIO2_REG_CSIRX_NOF_ENABLED_LANES
);
448 writel(CIO2_CGC_PRIM_TGE
|
452 CIO2_CGC_CSI2_INTERFRAME_TGE
|
453 CIO2_CGC_CSI2_PORT_DCGE
|
458 CIO2_CGC_CLKGATE_HOLDOFF
<< CIO2_CGC_CLKGATE_HOLDOFF_SHIFT
|
459 CIO2_CGC_CSI_CLKGATE_HOLDOFF
460 << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT
, base
+ CIO2_REG_CGC
);
461 writel(CIO2_LTRCTRL_LTRDYNEN
, base
+ CIO2_REG_LTRCTRL
);
462 writel(CIO2_LTRVAL0_VAL
<< CIO2_LTRVAL02_VAL_SHIFT
|
463 CIO2_LTRVAL0_SCALE
<< CIO2_LTRVAL02_SCALE_SHIFT
|
464 CIO2_LTRVAL1_VAL
<< CIO2_LTRVAL13_VAL_SHIFT
|
465 CIO2_LTRVAL1_SCALE
<< CIO2_LTRVAL13_SCALE_SHIFT
,
466 base
+ CIO2_REG_LTRVAL01
);
467 writel(CIO2_LTRVAL2_VAL
<< CIO2_LTRVAL02_VAL_SHIFT
|
468 CIO2_LTRVAL2_SCALE
<< CIO2_LTRVAL02_SCALE_SHIFT
|
469 CIO2_LTRVAL3_VAL
<< CIO2_LTRVAL13_VAL_SHIFT
|
470 CIO2_LTRVAL3_SCALE
<< CIO2_LTRVAL13_SCALE_SHIFT
,
471 base
+ CIO2_REG_LTRVAL23
);
473 for (i
= 0; i
< CIO2_NUM_DMA_CHAN
; i
++) {
474 writel(0, base
+ CIO2_REG_CDMABA(i
));
475 writel(0, base
+ CIO2_REG_CDMAC0(i
));
476 writel(0, base
+ CIO2_REG_CDMAC1(i
));
480 writel(q
->fbpt_bus_addr
>> PAGE_SHIFT
,
481 base
+ CIO2_REG_CDMABA(CIO2_DMA_CHAN
));
483 writel(num_buffers1
<< CIO2_CDMAC0_FBPT_LEN_SHIFT
|
484 FBPT_WIDTH
<< CIO2_CDMAC0_FBPT_WIDTH_SHIFT
|
485 CIO2_CDMAC0_DMA_INTR_ON_FE
|
486 CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL
|
488 CIO2_CDMAC0_DMA_INTR_ON_FS
|
489 CIO2_CDMAC0_DMA_HALTED
, base
+ CIO2_REG_CDMAC0(CIO2_DMA_CHAN
));
491 writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT
,
492 base
+ CIO2_REG_CDMAC1(CIO2_DMA_CHAN
));
494 writel(0, base
+ CIO2_REG_PBM_FOPN_ABORT
);
496 writel(CIO2_PXM_FRF_CFG_CRC_TH
<< CIO2_PXM_FRF_CFG_CRC_TH_SHIFT
|
497 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR
|
498 CIO2_PXM_FRF_CFG_MSK_ECC_RE
|
499 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE
,
500 base
+ CIO2_REG_PXM_FRF_CFG(q
->csi2
.port
));
502 /* Clear interrupts */
503 writel(CIO2_IRQCTRL_MASK
, q
->csi_rx_base
+ CIO2_REG_IRQCTRL_CLEAR
);
504 writel(~0, base
+ CIO2_REG_INT_STS_EXT_OE
);
505 writel(~0, base
+ CIO2_REG_INT_STS_EXT_IE
);
506 writel(~0, base
+ CIO2_REG_INT_STS
);
508 /* Enable devices, starting from the last device in the pipe */
509 writel(1, q
->csi_rx_base
+ CIO2_REG_MIPIBE_ENABLE
);
510 writel(1, q
->csi_rx_base
+ CIO2_REG_CSIRX_ENABLE
);
515 static void cio2_hw_exit(struct cio2_device
*cio2
, struct cio2_queue
*q
)
517 void __iomem
*base
= cio2
->base
;
518 unsigned int i
, maxloops
= 1000;
520 /* Disable CSI receiver and MIPI backend devices */
521 writel(0, q
->csi_rx_base
+ CIO2_REG_IRQCTRL_MASK
);
522 writel(0, q
->csi_rx_base
+ CIO2_REG_IRQCTRL_ENABLE
);
523 writel(0, q
->csi_rx_base
+ CIO2_REG_CSIRX_ENABLE
);
524 writel(0, q
->csi_rx_base
+ CIO2_REG_MIPIBE_ENABLE
);
527 writel(0, base
+ CIO2_REG_CDMAC0(CIO2_DMA_CHAN
));
529 if (readl(base
+ CIO2_REG_CDMAC0(CIO2_DMA_CHAN
)) &
530 CIO2_CDMAC0_DMA_HALTED
)
532 usleep_range(1000, 2000);
533 } while (--maxloops
);
535 dev_err(&cio2
->pci_dev
->dev
,
536 "DMA %i can not be halted\n", CIO2_DMA_CHAN
);
538 for (i
= 0; i
< CIO2_NUM_PORTS
; i
++) {
539 writel(readl(base
+ CIO2_REG_PXM_FRF_CFG(i
)) |
540 CIO2_PXM_FRF_CFG_ABORT
, base
+ CIO2_REG_PXM_FRF_CFG(i
));
541 writel(readl(base
+ CIO2_REG_PBM_FOPN_ABORT
) |
542 CIO2_PBM_FOPN_ABORT(i
), base
+ CIO2_REG_PBM_FOPN_ABORT
);
546 static void cio2_buffer_done(struct cio2_device
*cio2
, unsigned int dma_chan
)
548 struct device
*dev
= &cio2
->pci_dev
->dev
;
549 struct cio2_queue
*q
= cio2
->cur_queue
;
550 int buffers_found
= 0;
551 u64 ns
= ktime_get_ns();
553 if (dma_chan
>= CIO2_QUEUES
) {
554 dev_err(dev
, "bad DMA channel %i\n", dma_chan
);
558 /* Find out which buffer(s) are ready */
560 struct cio2_fbpt_entry
*const entry
=
561 &q
->fbpt
[q
->bufs_first
* CIO2_MAX_LOPS
];
562 struct cio2_buffer
*b
;
564 if (entry
->first_entry
.ctrl
& CIO2_FBPT_CTRL_VALID
)
567 b
= q
->bufs
[q
->bufs_first
];
569 unsigned int bytes
= entry
[1].second_entry
.num_of_bytes
;
571 q
->bufs
[q
->bufs_first
] = NULL
;
572 atomic_dec(&q
->bufs_queued
);
573 dev_dbg(&cio2
->pci_dev
->dev
,
574 "buffer %i done\n", b
->vbb
.vb2_buf
.index
);
576 b
->vbb
.vb2_buf
.timestamp
= ns
;
577 b
->vbb
.field
= V4L2_FIELD_NONE
;
578 b
->vbb
.sequence
= atomic_read(&q
->frame_sequence
);
579 if (b
->vbb
.vb2_buf
.planes
[0].length
!= bytes
)
580 dev_warn(dev
, "buffer length is %d received %d\n",
581 b
->vbb
.vb2_buf
.planes
[0].length
,
583 vb2_buffer_done(&b
->vbb
.vb2_buf
, VB2_BUF_STATE_DONE
);
585 atomic_inc(&q
->frame_sequence
);
586 cio2_fbpt_entry_init_dummy(cio2
, entry
);
587 q
->bufs_first
= (q
->bufs_first
+ 1) % CIO2_MAX_BUFFERS
;
591 if (buffers_found
== 0)
592 dev_warn(&cio2
->pci_dev
->dev
,
593 "no ready buffers found on DMA channel %u\n",
597 static void cio2_queue_event_sof(struct cio2_device
*cio2
, struct cio2_queue
*q
)
600 * For the user space camera control algorithms it is essential
601 * to know when the reception of a frame has begun. That's often
602 * the best timing information to get from the hardware.
604 struct v4l2_event event
= {
605 .type
= V4L2_EVENT_FRAME_SYNC
,
606 .u
.frame_sync
.frame_sequence
= atomic_read(&q
->frame_sequence
),
609 v4l2_event_queue(q
->subdev
.devnode
, &event
);
612 static const char *const cio2_irq_errs
[] = {
613 "single packet header error corrected",
614 "multiple packet header errors detected",
615 "payload checksum (CRC) error",
617 "reserved short packet data type detected",
618 "reserved long packet data type detected",
619 "incomplete long packet detected",
622 "DPHY start of transmission error",
623 "DPHY synchronization error",
625 "escape mode trigger event",
626 "escape mode ultra-low power state for data lane(s)",
627 "escape mode ultra-low power state exit for clock lane",
628 "inter-frame short packet discarded",
629 "inter-frame long packet discarded",
630 "non-matching Long Packet stalled",
633 static const char *const cio2_port_errs
[] = {
635 "DPHY not recoverable",
636 "ECC not recoverable",
643 static void cio2_irq_handle_once(struct cio2_device
*cio2
, u32 int_status
)
645 void __iomem
*const base
= cio2
->base
;
646 struct device
*dev
= &cio2
->pci_dev
->dev
;
648 if (int_status
& CIO2_INT_IOOE
) {
650 * Interrupt on Output Error:
651 * 1) SRAM is full and FS received, or
652 * 2) An invalid bit detected by DMA.
654 u32 oe_status
, oe_clear
;
656 oe_clear
= readl(base
+ CIO2_REG_INT_STS_EXT_OE
);
657 oe_status
= oe_clear
;
659 if (oe_status
& CIO2_INT_EXT_OE_DMAOE_MASK
) {
660 dev_err(dev
, "DMA output error: 0x%x\n",
661 (oe_status
& CIO2_INT_EXT_OE_DMAOE_MASK
)
662 >> CIO2_INT_EXT_OE_DMAOE_SHIFT
);
663 oe_status
&= ~CIO2_INT_EXT_OE_DMAOE_MASK
;
665 if (oe_status
& CIO2_INT_EXT_OE_OES_MASK
) {
666 dev_err(dev
, "DMA output error on CSI2 buses: 0x%x\n",
667 (oe_status
& CIO2_INT_EXT_OE_OES_MASK
)
668 >> CIO2_INT_EXT_OE_OES_SHIFT
);
669 oe_status
&= ~CIO2_INT_EXT_OE_OES_MASK
;
671 writel(oe_clear
, base
+ CIO2_REG_INT_STS_EXT_OE
);
673 dev_warn(dev
, "unknown interrupt 0x%x on OE\n",
675 int_status
&= ~CIO2_INT_IOOE
;
678 if (int_status
& CIO2_INT_IOC_MASK
) {
679 /* DMA IO done -- frame ready */
683 for (d
= 0; d
< CIO2_NUM_DMA_CHAN
; d
++)
684 if (int_status
& CIO2_INT_IOC(d
)) {
685 clr
|= CIO2_INT_IOC(d
);
686 cio2_buffer_done(cio2
, d
);
691 if (int_status
& CIO2_INT_IOS_IOLN_MASK
) {
692 /* DMA IO starts or reached specified line */
696 for (d
= 0; d
< CIO2_NUM_DMA_CHAN
; d
++)
697 if (int_status
& CIO2_INT_IOS_IOLN(d
)) {
698 clr
|= CIO2_INT_IOS_IOLN(d
);
699 if (d
== CIO2_DMA_CHAN
)
700 cio2_queue_event_sof(cio2
,
706 if (int_status
& (CIO2_INT_IOIE
| CIO2_INT_IOIRQ
)) {
707 /* CSI2 receiver (error) interrupt */
708 u32 ie_status
, ie_clear
;
711 ie_clear
= readl(base
+ CIO2_REG_INT_STS_EXT_IE
);
712 ie_status
= ie_clear
;
714 for (port
= 0; port
< CIO2_NUM_PORTS
; port
++) {
715 u32 port_status
= (ie_status
>> (port
* 8)) & 0xff;
716 u32 err_mask
= BIT_MASK(ARRAY_SIZE(cio2_port_errs
)) - 1;
717 void __iomem
*const csi_rx_base
=
718 base
+ CIO2_REG_PIPE_BASE(port
);
721 while (port_status
& err_mask
) {
722 i
= ffs(port_status
) - 1;
723 dev_err(dev
, "port %i error %s\n",
724 port
, cio2_port_errs
[i
]);
725 ie_status
&= ~BIT(port
* 8 + i
);
726 port_status
&= ~BIT(i
);
729 if (ie_status
& CIO2_INT_EXT_IE_IRQ(port
)) {
730 u32 csi2_status
, csi2_clear
;
732 csi2_status
= readl(csi_rx_base
+
733 CIO2_REG_IRQCTRL_STATUS
);
734 csi2_clear
= csi2_status
;
736 BIT_MASK(ARRAY_SIZE(cio2_irq_errs
)) - 1;
738 while (csi2_status
& err_mask
) {
739 i
= ffs(csi2_status
) - 1;
741 "CSI-2 receiver port %i: %s\n",
742 port
, cio2_irq_errs
[i
]);
743 csi2_status
&= ~BIT(i
);
747 csi_rx_base
+ CIO2_REG_IRQCTRL_CLEAR
);
750 "unknown CSI2 error 0x%x on port %i\n",
753 ie_status
&= ~CIO2_INT_EXT_IE_IRQ(port
);
757 writel(ie_clear
, base
+ CIO2_REG_INT_STS_EXT_IE
);
759 dev_warn(dev
, "unknown interrupt 0x%x on IE\n",
762 int_status
&= ~(CIO2_INT_IOIE
| CIO2_INT_IOIRQ
);
766 dev_warn(dev
, "unknown interrupt 0x%x on INT\n", int_status
);
769 static irqreturn_t
cio2_irq(int irq
, void *cio2_ptr
)
771 struct cio2_device
*cio2
= cio2_ptr
;
772 void __iomem
*const base
= cio2
->base
;
773 struct device
*dev
= &cio2
->pci_dev
->dev
;
776 int_status
= readl(base
+ CIO2_REG_INT_STS
);
777 dev_dbg(dev
, "isr enter - interrupt status 0x%x\n", int_status
);
782 writel(int_status
, base
+ CIO2_REG_INT_STS
);
783 cio2_irq_handle_once(cio2
, int_status
);
784 int_status
= readl(base
+ CIO2_REG_INT_STS
);
786 dev_dbg(dev
, "pending status 0x%x\n", int_status
);
787 } while (int_status
);
792 /**************** Videobuf2 interface ****************/
794 static void cio2_vb2_return_all_buffers(struct cio2_queue
*q
,
795 enum vb2_buffer_state state
)
799 for (i
= 0; i
< CIO2_MAX_BUFFERS
; i
++) {
801 atomic_dec(&q
->bufs_queued
);
802 vb2_buffer_done(&q
->bufs
[i
]->vbb
.vb2_buf
,
808 static int cio2_vb2_queue_setup(struct vb2_queue
*vq
,
809 unsigned int *num_buffers
,
810 unsigned int *num_planes
,
811 unsigned int sizes
[],
812 struct device
*alloc_devs
[])
814 struct cio2_device
*cio2
= vb2_get_drv_priv(vq
);
815 struct cio2_queue
*q
= vb2q_to_cio2_queue(vq
);
818 *num_planes
= q
->format
.num_planes
;
820 for (i
= 0; i
< *num_planes
; ++i
) {
821 sizes
[i
] = q
->format
.plane_fmt
[i
].sizeimage
;
822 alloc_devs
[i
] = &cio2
->pci_dev
->dev
;
825 *num_buffers
= clamp_val(*num_buffers
, 1, CIO2_MAX_BUFFERS
);
827 /* Initialize buffer queue */
828 for (i
= 0; i
< CIO2_MAX_BUFFERS
; i
++) {
830 cio2_fbpt_entry_init_dummy(cio2
, &q
->fbpt
[i
* CIO2_MAX_LOPS
]);
832 atomic_set(&q
->bufs_queued
, 0);
839 /* Called after each buffer is allocated */
840 static int cio2_vb2_buf_init(struct vb2_buffer
*vb
)
842 struct cio2_device
*cio2
= vb2_get_drv_priv(vb
->vb2_queue
);
843 struct device
*dev
= &cio2
->pci_dev
->dev
;
844 struct cio2_buffer
*b
=
845 container_of(vb
, struct cio2_buffer
, vbb
.vb2_buf
);
846 static const unsigned int entries_per_page
=
847 CIO2_PAGE_SIZE
/ sizeof(u32
);
848 unsigned int pages
= DIV_ROUND_UP(vb
->planes
[0].length
, CIO2_PAGE_SIZE
);
849 unsigned int lops
= DIV_ROUND_UP(pages
+ 1, entries_per_page
);
851 struct sg_page_iter sg_iter
;
854 if (lops
<= 0 || lops
> CIO2_MAX_LOPS
) {
855 dev_err(dev
, "%s: bad buffer size (%i)\n", __func__
,
856 vb
->planes
[0].length
);
857 return -ENOSPC
; /* Should never happen */
860 memset(b
->lop
, 0, sizeof(b
->lop
));
861 /* Allocate LOP table */
862 for (i
= 0; i
< lops
; i
++) {
863 b
->lop
[i
] = dma_alloc_coherent(dev
, CIO2_PAGE_SIZE
,
864 &b
->lop_bus_addr
[i
], GFP_KERNEL
);
870 sg
= vb2_dma_sg_plane_desc(vb
, 0);
874 if (sg
->nents
&& sg
->sgl
)
875 b
->offset
= sg
->sgl
->offset
;
878 for_each_sg_page(sg
->sgl
, &sg_iter
, sg
->nents
, 0) {
881 b
->lop
[i
][j
] = sg_page_iter_dma_address(&sg_iter
) >> PAGE_SHIFT
;
883 if (j
== entries_per_page
) {
889 b
->lop
[i
][j
] = cio2
->dummy_page_bus_addr
>> PAGE_SHIFT
;
892 for (i
--; i
>= 0; i
--)
893 dma_free_coherent(dev
, CIO2_PAGE_SIZE
,
894 b
->lop
[i
], b
->lop_bus_addr
[i
]);
898 /* Transfer buffer ownership to cio2 */
899 static void cio2_vb2_buf_queue(struct vb2_buffer
*vb
)
901 struct cio2_device
*cio2
= vb2_get_drv_priv(vb
->vb2_queue
);
902 struct cio2_queue
*q
=
903 container_of(vb
->vb2_queue
, struct cio2_queue
, vbq
);
904 struct cio2_buffer
*b
=
905 container_of(vb
, struct cio2_buffer
, vbb
.vb2_buf
);
906 struct cio2_fbpt_entry
*entry
;
908 unsigned int i
, j
, next
= q
->bufs_next
;
909 int bufs_queued
= atomic_inc_return(&q
->bufs_queued
);
912 dev_dbg(&cio2
->pci_dev
->dev
, "queue buffer %d\n", vb
->index
);
915 * This code queues the buffer to the CIO2 DMA engine, which starts
916 * running once streaming has started. It is possible that this code
917 * gets pre-empted due to increased CPU load. Upon this, the driver
918 * does not get an opportunity to queue new buffers to the CIO2 DMA
919 * engine. When the DMA engine encounters an FBPT entry without the
920 * VALID bit set, the DMA engine halts, which requires a restart of
921 * the DMA engine and sensor, to continue streaming.
922 * This is not desired and is highly unlikely given that there are
923 * 32 FBPT entries that the DMA engine needs to process, to run into
924 * an FBPT entry, without the VALID bit set. We try to mitigate this
925 * by disabling interrupts for the duration of this queueing.
927 local_irq_save(flags
);
929 fbpt_rp
= (readl(cio2
->base
+ CIO2_REG_CDMARI(CIO2_DMA_CHAN
))
930 >> CIO2_CDMARI_FBPT_RP_SHIFT
)
931 & CIO2_CDMARI_FBPT_RP_MASK
;
934 * fbpt_rp is the fbpt entry that the dma is currently working
935 * on, but since it could jump to next entry at any time,
936 * assume that we might already be there.
938 fbpt_rp
= (fbpt_rp
+ 1) % CIO2_MAX_BUFFERS
;
940 if (bufs_queued
<= 1 || fbpt_rp
== next
)
941 /* Buffers were drained */
942 next
= (fbpt_rp
+ 1) % CIO2_MAX_BUFFERS
;
944 for (i
= 0; i
< CIO2_MAX_BUFFERS
; i
++) {
946 * We have allocated CIO2_MAX_BUFFERS circularly for the
947 * hw, the user has requested N buffer queue. The driver
948 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
949 * user queues a buffer, there necessarily is a free buffer.
951 if (!q
->bufs
[next
]) {
953 entry
= &q
->fbpt
[next
* CIO2_MAX_LOPS
];
954 cio2_fbpt_entry_init_buf(cio2
, b
, entry
);
955 local_irq_restore(flags
);
956 q
->bufs_next
= (next
+ 1) % CIO2_MAX_BUFFERS
;
957 for (j
= 0; j
< vb
->num_planes
; j
++)
958 vb2_set_plane_payload(vb
, j
,
959 q
->format
.plane_fmt
[j
].sizeimage
);
963 dev_dbg(&cio2
->pci_dev
->dev
, "entry %i was full!\n", next
);
964 next
= (next
+ 1) % CIO2_MAX_BUFFERS
;
967 local_irq_restore(flags
);
968 dev_err(&cio2
->pci_dev
->dev
, "error: all cio2 entries were full!\n");
969 atomic_dec(&q
->bufs_queued
);
970 vb2_buffer_done(vb
, VB2_BUF_STATE_ERROR
);
973 /* Called when each buffer is freed */
974 static void cio2_vb2_buf_cleanup(struct vb2_buffer
*vb
)
976 struct cio2_device
*cio2
= vb2_get_drv_priv(vb
->vb2_queue
);
977 struct cio2_buffer
*b
=
978 container_of(vb
, struct cio2_buffer
, vbb
.vb2_buf
);
982 for (i
= 0; i
< CIO2_MAX_LOPS
; i
++) {
984 dma_free_coherent(&cio2
->pci_dev
->dev
, CIO2_PAGE_SIZE
,
985 b
->lop
[i
], b
->lop_bus_addr
[i
]);
989 static int cio2_vb2_start_streaming(struct vb2_queue
*vq
, unsigned int count
)
991 struct cio2_queue
*q
= vb2q_to_cio2_queue(vq
);
992 struct cio2_device
*cio2
= vb2_get_drv_priv(vq
);
996 atomic_set(&q
->frame_sequence
, 0);
998 r
= pm_runtime_get_sync(&cio2
->pci_dev
->dev
);
1000 dev_info(&cio2
->pci_dev
->dev
, "failed to set power %d\n", r
);
1001 pm_runtime_put_noidle(&cio2
->pci_dev
->dev
);
1005 r
= media_pipeline_start(&q
->vdev
.entity
, &q
->pipe
);
1009 r
= cio2_hw_init(cio2
, q
);
1013 /* Start streaming on sensor */
1014 r
= v4l2_subdev_call(q
->sensor
, video
, s_stream
, 1);
1016 goto fail_csi2_subdev
;
1018 cio2
->streaming
= true;
1023 cio2_hw_exit(cio2
, q
);
1025 media_pipeline_stop(&q
->vdev
.entity
);
1027 dev_dbg(&cio2
->pci_dev
->dev
, "failed to start streaming (%d)\n", r
);
1028 cio2_vb2_return_all_buffers(q
, VB2_BUF_STATE_QUEUED
);
1029 pm_runtime_put(&cio2
->pci_dev
->dev
);
1034 static void cio2_vb2_stop_streaming(struct vb2_queue
*vq
)
1036 struct cio2_queue
*q
= vb2q_to_cio2_queue(vq
);
1037 struct cio2_device
*cio2
= vb2_get_drv_priv(vq
);
1039 if (v4l2_subdev_call(q
->sensor
, video
, s_stream
, 0))
1040 dev_err(&cio2
->pci_dev
->dev
,
1041 "failed to stop sensor streaming\n");
1043 cio2_hw_exit(cio2
, q
);
1044 synchronize_irq(cio2
->pci_dev
->irq
);
1045 cio2_vb2_return_all_buffers(q
, VB2_BUF_STATE_ERROR
);
1046 media_pipeline_stop(&q
->vdev
.entity
);
1047 pm_runtime_put(&cio2
->pci_dev
->dev
);
1048 cio2
->streaming
= false;
1051 static const struct vb2_ops cio2_vb2_ops
= {
1052 .buf_init
= cio2_vb2_buf_init
,
1053 .buf_queue
= cio2_vb2_buf_queue
,
1054 .buf_cleanup
= cio2_vb2_buf_cleanup
,
1055 .queue_setup
= cio2_vb2_queue_setup
,
1056 .start_streaming
= cio2_vb2_start_streaming
,
1057 .stop_streaming
= cio2_vb2_stop_streaming
,
1058 .wait_prepare
= vb2_ops_wait_prepare
,
1059 .wait_finish
= vb2_ops_wait_finish
,
1062 /**************** V4L2 interface ****************/
1064 static int cio2_v4l2_querycap(struct file
*file
, void *fh
,
1065 struct v4l2_capability
*cap
)
1067 struct cio2_device
*cio2
= video_drvdata(file
);
1069 strlcpy(cap
->driver
, CIO2_NAME
, sizeof(cap
->driver
));
1070 strlcpy(cap
->card
, CIO2_DEVICE_NAME
, sizeof(cap
->card
));
1071 snprintf(cap
->bus_info
, sizeof(cap
->bus_info
),
1072 "PCI:%s", pci_name(cio2
->pci_dev
));
1077 static int cio2_v4l2_enum_fmt(struct file
*file
, void *fh
,
1078 struct v4l2_fmtdesc
*f
)
1080 if (f
->index
>= ARRAY_SIZE(formats
))
1083 f
->pixelformat
= formats
[f
->index
].fourcc
;
1088 /* The format is validated in cio2_video_link_validate() */
1089 static int cio2_v4l2_g_fmt(struct file
*file
, void *fh
, struct v4l2_format
*f
)
1091 struct cio2_queue
*q
= file_to_cio2_queue(file
);
1093 f
->fmt
.pix_mp
= q
->format
;
1098 static int cio2_v4l2_try_fmt(struct file
*file
, void *fh
, struct v4l2_format
*f
)
1100 const struct ipu3_cio2_fmt
*fmt
;
1101 struct v4l2_pix_format_mplane
*mpix
= &f
->fmt
.pix_mp
;
1103 fmt
= cio2_find_format(&mpix
->pixelformat
, NULL
);
1107 /* Only supports up to 4224x3136 */
1108 if (mpix
->width
> CIO2_IMAGE_MAX_WIDTH
)
1109 mpix
->width
= CIO2_IMAGE_MAX_WIDTH
;
1110 if (mpix
->height
> CIO2_IMAGE_MAX_LENGTH
)
1111 mpix
->height
= CIO2_IMAGE_MAX_LENGTH
;
1113 mpix
->num_planes
= 1;
1114 mpix
->pixelformat
= fmt
->fourcc
;
1115 mpix
->colorspace
= V4L2_COLORSPACE_RAW
;
1116 mpix
->field
= V4L2_FIELD_NONE
;
1117 memset(mpix
->reserved
, 0, sizeof(mpix
->reserved
));
1118 mpix
->plane_fmt
[0].bytesperline
= cio2_bytesperline(mpix
->width
);
1119 mpix
->plane_fmt
[0].sizeimage
= mpix
->plane_fmt
[0].bytesperline
*
1121 memset(mpix
->plane_fmt
[0].reserved
, 0,
1122 sizeof(mpix
->plane_fmt
[0].reserved
));
1125 mpix
->ycbcr_enc
= V4L2_YCBCR_ENC_DEFAULT
;
1126 mpix
->quantization
= V4L2_QUANTIZATION_DEFAULT
;
1127 mpix
->xfer_func
= V4L2_XFER_FUNC_DEFAULT
;
1132 static int cio2_v4l2_s_fmt(struct file
*file
, void *fh
, struct v4l2_format
*f
)
1134 struct cio2_queue
*q
= file_to_cio2_queue(file
);
1136 cio2_v4l2_try_fmt(file
, fh
, f
);
1137 q
->format
= f
->fmt
.pix_mp
;
1143 cio2_video_enum_input(struct file
*file
, void *fh
, struct v4l2_input
*input
)
1145 if (input
->index
> 0)
1148 strlcpy(input
->name
, "camera", sizeof(input
->name
));
1149 input
->type
= V4L2_INPUT_TYPE_CAMERA
;
1155 cio2_video_g_input(struct file
*file
, void *fh
, unsigned int *input
)
1163 cio2_video_s_input(struct file
*file
, void *fh
, unsigned int input
)
1165 return input
== 0 ? 0 : -EINVAL
;
1168 static const struct v4l2_file_operations cio2_v4l2_fops
= {
1169 .owner
= THIS_MODULE
,
1170 .unlocked_ioctl
= video_ioctl2
,
1171 .open
= v4l2_fh_open
,
1172 .release
= vb2_fop_release
,
1173 .poll
= vb2_fop_poll
,
1174 .mmap
= vb2_fop_mmap
,
1177 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops
= {
1178 .vidioc_querycap
= cio2_v4l2_querycap
,
1179 .vidioc_enum_fmt_vid_cap_mplane
= cio2_v4l2_enum_fmt
,
1180 .vidioc_g_fmt_vid_cap_mplane
= cio2_v4l2_g_fmt
,
1181 .vidioc_s_fmt_vid_cap_mplane
= cio2_v4l2_s_fmt
,
1182 .vidioc_try_fmt_vid_cap_mplane
= cio2_v4l2_try_fmt
,
1183 .vidioc_reqbufs
= vb2_ioctl_reqbufs
,
1184 .vidioc_create_bufs
= vb2_ioctl_create_bufs
,
1185 .vidioc_prepare_buf
= vb2_ioctl_prepare_buf
,
1186 .vidioc_querybuf
= vb2_ioctl_querybuf
,
1187 .vidioc_qbuf
= vb2_ioctl_qbuf
,
1188 .vidioc_dqbuf
= vb2_ioctl_dqbuf
,
1189 .vidioc_streamon
= vb2_ioctl_streamon
,
1190 .vidioc_streamoff
= vb2_ioctl_streamoff
,
1191 .vidioc_expbuf
= vb2_ioctl_expbuf
,
1192 .vidioc_enum_input
= cio2_video_enum_input
,
1193 .vidioc_g_input
= cio2_video_g_input
,
1194 .vidioc_s_input
= cio2_video_s_input
,
1197 static int cio2_subdev_subscribe_event(struct v4l2_subdev
*sd
,
1199 struct v4l2_event_subscription
*sub
)
1201 if (sub
->type
!= V4L2_EVENT_FRAME_SYNC
)
1204 /* Line number. For now only zero accepted. */
1208 return v4l2_event_subscribe(fh
, sub
, 0, NULL
);
1211 static int cio2_subdev_open(struct v4l2_subdev
*sd
, struct v4l2_subdev_fh
*fh
)
1213 struct v4l2_mbus_framefmt
*format
;
1214 const struct v4l2_mbus_framefmt fmt_default
= {
1217 .code
= formats
[0].mbus_code
,
1218 .field
= V4L2_FIELD_NONE
,
1219 .colorspace
= V4L2_COLORSPACE_RAW
,
1220 .ycbcr_enc
= V4L2_YCBCR_ENC_DEFAULT
,
1221 .quantization
= V4L2_QUANTIZATION_DEFAULT
,
1222 .xfer_func
= V4L2_XFER_FUNC_DEFAULT
,
1225 /* Initialize try_fmt */
1226 format
= v4l2_subdev_get_try_format(sd
, fh
->pad
, CIO2_PAD_SINK
);
1227 *format
= fmt_default
;
1230 format
= v4l2_subdev_get_try_format(sd
, fh
->pad
, CIO2_PAD_SOURCE
);
1231 *format
= fmt_default
;
1237 * cio2_subdev_get_fmt - Handle get format by pads subdev method
1238 * @sd : pointer to v4l2 subdev structure
1239 * @cfg: V4L2 subdev pad config
1240 * @fmt: pointer to v4l2 subdev format structure
1241 * return -EINVAL or zero on success
1243 static int cio2_subdev_get_fmt(struct v4l2_subdev
*sd
,
1244 struct v4l2_subdev_pad_config
*cfg
,
1245 struct v4l2_subdev_format
*fmt
)
1247 struct cio2_queue
*q
= container_of(sd
, struct cio2_queue
, subdev
);
1248 struct v4l2_subdev_format format
;
1251 if (fmt
->which
== V4L2_SUBDEV_FORMAT_TRY
) {
1252 fmt
->format
= *v4l2_subdev_get_try_format(sd
, cfg
, fmt
->pad
);
1256 if (fmt
->pad
== CIO2_PAD_SINK
) {
1257 format
.which
= V4L2_SUBDEV_FORMAT_ACTIVE
;
1258 ret
= v4l2_subdev_call(sd
, pad
, get_fmt
, NULL
,
1263 /* update colorspace etc */
1264 q
->subdev_fmt
.colorspace
= format
.format
.colorspace
;
1265 q
->subdev_fmt
.ycbcr_enc
= format
.format
.ycbcr_enc
;
1266 q
->subdev_fmt
.quantization
= format
.format
.quantization
;
1267 q
->subdev_fmt
.xfer_func
= format
.format
.xfer_func
;
1270 fmt
->format
= q
->subdev_fmt
;
1276 * cio2_subdev_set_fmt - Handle set format by pads subdev method
1277 * @sd : pointer to v4l2 subdev structure
1278 * @cfg: V4L2 subdev pad config
1279 * @fmt: pointer to v4l2 subdev format structure
1280 * return -EINVAL or zero on success
1282 static int cio2_subdev_set_fmt(struct v4l2_subdev
*sd
,
1283 struct v4l2_subdev_pad_config
*cfg
,
1284 struct v4l2_subdev_format
*fmt
)
1286 struct cio2_queue
*q
= container_of(sd
, struct cio2_queue
, subdev
);
1289 * Only allow setting sink pad format;
1290 * source always propagates from sink
1292 if (fmt
->pad
== CIO2_PAD_SOURCE
)
1293 return cio2_subdev_get_fmt(sd
, cfg
, fmt
);
1295 if (fmt
->which
== V4L2_SUBDEV_FORMAT_TRY
) {
1296 *v4l2_subdev_get_try_format(sd
, cfg
, fmt
->pad
) = fmt
->format
;
1298 /* It's the sink, allow changing frame size */
1299 q
->subdev_fmt
.width
= fmt
->format
.width
;
1300 q
->subdev_fmt
.height
= fmt
->format
.height
;
1301 q
->subdev_fmt
.code
= fmt
->format
.code
;
1302 fmt
->format
= q
->subdev_fmt
;
1308 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev
*sd
,
1309 struct v4l2_subdev_pad_config
*cfg
,
1310 struct v4l2_subdev_mbus_code_enum
*code
)
1312 if (code
->index
>= ARRAY_SIZE(formats
))
1315 code
->code
= formats
[code
->index
].mbus_code
;
1319 static int cio2_subdev_link_validate_get_format(struct media_pad
*pad
,
1320 struct v4l2_subdev_format
*fmt
)
1322 if (is_media_entity_v4l2_subdev(pad
->entity
)) {
1323 struct v4l2_subdev
*sd
=
1324 media_entity_to_v4l2_subdev(pad
->entity
);
1326 fmt
->which
= V4L2_SUBDEV_FORMAT_ACTIVE
;
1327 fmt
->pad
= pad
->index
;
1328 return v4l2_subdev_call(sd
, pad
, get_fmt
, NULL
, fmt
);
1334 static int cio2_video_link_validate(struct media_link
*link
)
1336 struct video_device
*vd
= container_of(link
->sink
->entity
,
1337 struct video_device
, entity
);
1338 struct cio2_queue
*q
= container_of(vd
, struct cio2_queue
, vdev
);
1339 struct cio2_device
*cio2
= video_get_drvdata(vd
);
1340 struct v4l2_subdev_format source_fmt
;
1343 if (!media_entity_remote_pad(link
->sink
->entity
->pads
)) {
1344 dev_info(&cio2
->pci_dev
->dev
,
1345 "video node %s pad not connected\n", vd
->name
);
1349 ret
= cio2_subdev_link_validate_get_format(link
->source
, &source_fmt
);
1353 if (source_fmt
.format
.width
!= q
->format
.width
||
1354 source_fmt
.format
.height
!= q
->format
.height
) {
1355 dev_err(&cio2
->pci_dev
->dev
,
1356 "Wrong width or height %ux%u (%ux%u expected)\n",
1357 q
->format
.width
, q
->format
.height
,
1358 source_fmt
.format
.width
, source_fmt
.format
.height
);
1362 if (!cio2_find_format(&q
->format
.pixelformat
, &source_fmt
.format
.code
))
1368 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops
= {
1369 .subscribe_event
= cio2_subdev_subscribe_event
,
1370 .unsubscribe_event
= v4l2_event_subdev_unsubscribe
,
1373 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops
= {
1374 .open
= cio2_subdev_open
,
1377 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops
= {
1378 .link_validate
= v4l2_subdev_link_validate_default
,
1379 .get_fmt
= cio2_subdev_get_fmt
,
1380 .set_fmt
= cio2_subdev_set_fmt
,
1381 .enum_mbus_code
= cio2_subdev_enum_mbus_code
,
1384 static const struct v4l2_subdev_ops cio2_subdev_ops
= {
1385 .core
= &cio2_subdev_core_ops
,
1386 .pad
= &cio2_subdev_pad_ops
,
1389 /******* V4L2 sub-device asynchronous registration callbacks***********/
1391 struct sensor_async_subdev
{
1392 struct v4l2_async_subdev asd
;
1393 struct csi2_bus_info csi2
;
1396 /* The .bound() notifier callback when a match is found */
1397 static int cio2_notifier_bound(struct v4l2_async_notifier
*notifier
,
1398 struct v4l2_subdev
*sd
,
1399 struct v4l2_async_subdev
*asd
)
1401 struct cio2_device
*cio2
= container_of(notifier
,
1402 struct cio2_device
, notifier
);
1403 struct sensor_async_subdev
*s_asd
= container_of(asd
,
1404 struct sensor_async_subdev
, asd
);
1405 struct cio2_queue
*q
;
1407 if (cio2
->queue
[s_asd
->csi2
.port
].sensor
)
1410 q
= &cio2
->queue
[s_asd
->csi2
.port
];
1412 q
->csi2
= s_asd
->csi2
;
1414 q
->csi_rx_base
= cio2
->base
+ CIO2_REG_PIPE_BASE(q
->csi2
.port
);
1419 /* The .unbind callback */
1420 static void cio2_notifier_unbind(struct v4l2_async_notifier
*notifier
,
1421 struct v4l2_subdev
*sd
,
1422 struct v4l2_async_subdev
*asd
)
1424 struct cio2_device
*cio2
= container_of(notifier
,
1425 struct cio2_device
, notifier
);
1426 struct sensor_async_subdev
*s_asd
= container_of(asd
,
1427 struct sensor_async_subdev
, asd
);
1429 cio2
->queue
[s_asd
->csi2
.port
].sensor
= NULL
;
1432 /* .complete() is called after all subdevices have been located */
1433 static int cio2_notifier_complete(struct v4l2_async_notifier
*notifier
)
1435 struct cio2_device
*cio2
= container_of(notifier
, struct cio2_device
,
1437 struct sensor_async_subdev
*s_asd
;
1438 struct cio2_queue
*q
;
1439 unsigned int i
, pad
;
1442 for (i
= 0; i
< notifier
->num_subdevs
; i
++) {
1443 s_asd
= container_of(cio2
->notifier
.subdevs
[i
],
1444 struct sensor_async_subdev
, asd
);
1445 q
= &cio2
->queue
[s_asd
->csi2
.port
];
1447 for (pad
= 0; pad
< q
->sensor
->entity
.num_pads
; pad
++)
1448 if (q
->sensor
->entity
.pads
[pad
].flags
&
1449 MEDIA_PAD_FL_SOURCE
)
1452 if (pad
== q
->sensor
->entity
.num_pads
) {
1453 dev_err(&cio2
->pci_dev
->dev
,
1454 "failed to find src pad for %s\n",
1459 ret
= media_create_pad_link(
1460 &q
->sensor
->entity
, pad
,
1461 &q
->subdev
.entity
, CIO2_PAD_SINK
,
1464 dev_err(&cio2
->pci_dev
->dev
,
1465 "failed to create link for %s\n",
1466 cio2
->queue
[i
].sensor
->name
);
1471 return v4l2_device_register_subdev_nodes(&cio2
->v4l2_dev
);
1474 static const struct v4l2_async_notifier_operations cio2_async_ops
= {
1475 .bound
= cio2_notifier_bound
,
1476 .unbind
= cio2_notifier_unbind
,
1477 .complete
= cio2_notifier_complete
,
1480 static int cio2_fwnode_parse(struct device
*dev
,
1481 struct v4l2_fwnode_endpoint
*vep
,
1482 struct v4l2_async_subdev
*asd
)
1484 struct sensor_async_subdev
*s_asd
=
1485 container_of(asd
, struct sensor_async_subdev
, asd
);
1487 if (vep
->bus_type
!= V4L2_MBUS_CSI2
) {
1488 dev_err(dev
, "Only CSI2 bus type is currently supported\n");
1492 s_asd
->csi2
.port
= vep
->base
.port
;
1493 s_asd
->csi2
.lanes
= vep
->bus
.mipi_csi2
.num_data_lanes
;
1498 static int cio2_notifier_init(struct cio2_device
*cio2
)
1502 ret
= v4l2_async_notifier_parse_fwnode_endpoints(
1503 &cio2
->pci_dev
->dev
, &cio2
->notifier
,
1504 sizeof(struct sensor_async_subdev
),
1509 if (!cio2
->notifier
.num_subdevs
)
1510 return -ENODEV
; /* no endpoint */
1512 cio2
->notifier
.ops
= &cio2_async_ops
;
1513 ret
= v4l2_async_notifier_register(&cio2
->v4l2_dev
, &cio2
->notifier
);
1515 dev_err(&cio2
->pci_dev
->dev
,
1516 "failed to register async notifier : %d\n", ret
);
1517 v4l2_async_notifier_cleanup(&cio2
->notifier
);
1523 static void cio2_notifier_exit(struct cio2_device
*cio2
)
1525 v4l2_async_notifier_unregister(&cio2
->notifier
);
1526 v4l2_async_notifier_cleanup(&cio2
->notifier
);
1529 /**************** Queue initialization ****************/
1530 static const struct media_entity_operations cio2_media_ops
= {
1531 .link_validate
= v4l2_subdev_link_validate
,
1534 static const struct media_entity_operations cio2_video_entity_ops
= {
1535 .link_validate
= cio2_video_link_validate
,
1538 static int cio2_queue_init(struct cio2_device
*cio2
, struct cio2_queue
*q
)
1540 static const u32 default_width
= 1936;
1541 static const u32 default_height
= 1096;
1542 const struct ipu3_cio2_fmt dflt_fmt
= formats
[0];
1544 struct video_device
*vdev
= &q
->vdev
;
1545 struct vb2_queue
*vbq
= &q
->vbq
;
1546 struct v4l2_subdev
*subdev
= &q
->subdev
;
1547 struct v4l2_mbus_framefmt
*fmt
;
1550 /* Initialize miscellaneous variables */
1551 mutex_init(&q
->lock
);
1553 /* Initialize formats to default values */
1554 fmt
= &q
->subdev_fmt
;
1555 fmt
->width
= default_width
;
1556 fmt
->height
= default_height
;
1557 fmt
->code
= dflt_fmt
.mbus_code
;
1558 fmt
->field
= V4L2_FIELD_NONE
;
1560 q
->format
.width
= default_width
;
1561 q
->format
.height
= default_height
;
1562 q
->format
.pixelformat
= dflt_fmt
.fourcc
;
1563 q
->format
.colorspace
= V4L2_COLORSPACE_RAW
;
1564 q
->format
.field
= V4L2_FIELD_NONE
;
1565 q
->format
.num_planes
= 1;
1566 q
->format
.plane_fmt
[0].bytesperline
=
1567 cio2_bytesperline(q
->format
.width
);
1568 q
->format
.plane_fmt
[0].sizeimage
= q
->format
.plane_fmt
[0].bytesperline
*
1571 /* Initialize fbpt */
1572 r
= cio2_fbpt_init(cio2
, q
);
1576 /* Initialize media entities */
1577 q
->subdev_pads
[CIO2_PAD_SINK
].flags
= MEDIA_PAD_FL_SINK
|
1578 MEDIA_PAD_FL_MUST_CONNECT
;
1579 q
->subdev_pads
[CIO2_PAD_SOURCE
].flags
= MEDIA_PAD_FL_SOURCE
;
1580 subdev
->entity
.ops
= &cio2_media_ops
;
1581 subdev
->internal_ops
= &cio2_subdev_internal_ops
;
1582 r
= media_entity_pads_init(&subdev
->entity
, CIO2_PADS
, q
->subdev_pads
);
1584 dev_err(&cio2
->pci_dev
->dev
,
1585 "failed initialize subdev media entity (%d)\n", r
);
1586 goto fail_subdev_media_entity
;
1589 q
->vdev_pad
.flags
= MEDIA_PAD_FL_SINK
| MEDIA_PAD_FL_MUST_CONNECT
;
1590 vdev
->entity
.ops
= &cio2_video_entity_ops
;
1591 r
= media_entity_pads_init(&vdev
->entity
, 1, &q
->vdev_pad
);
1593 dev_err(&cio2
->pci_dev
->dev
,
1594 "failed initialize videodev media entity (%d)\n", r
);
1595 goto fail_vdev_media_entity
;
1598 /* Initialize subdev */
1599 v4l2_subdev_init(subdev
, &cio2_subdev_ops
);
1600 subdev
->flags
= V4L2_SUBDEV_FL_HAS_DEVNODE
| V4L2_SUBDEV_FL_HAS_EVENTS
;
1601 subdev
->owner
= THIS_MODULE
;
1602 snprintf(subdev
->name
, sizeof(subdev
->name
),
1603 CIO2_ENTITY_NAME
" %td", q
- cio2
->queue
);
1604 v4l2_set_subdevdata(subdev
, cio2
);
1605 r
= v4l2_device_register_subdev(&cio2
->v4l2_dev
, subdev
);
1607 dev_err(&cio2
->pci_dev
->dev
,
1608 "failed initialize subdev (%d)\n", r
);
1612 /* Initialize vbq */
1613 vbq
->type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
1614 vbq
->io_modes
= VB2_USERPTR
| VB2_MMAP
| VB2_DMABUF
;
1615 vbq
->ops
= &cio2_vb2_ops
;
1616 vbq
->mem_ops
= &vb2_dma_sg_memops
;
1617 vbq
->buf_struct_size
= sizeof(struct cio2_buffer
);
1618 vbq
->timestamp_flags
= V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
;
1619 vbq
->min_buffers_needed
= 1;
1620 vbq
->drv_priv
= cio2
;
1621 vbq
->lock
= &q
->lock
;
1622 r
= vb2_queue_init(vbq
);
1624 dev_err(&cio2
->pci_dev
->dev
,
1625 "failed to initialize videobuf2 queue (%d)\n", r
);
1629 /* Initialize vdev */
1630 snprintf(vdev
->name
, sizeof(vdev
->name
),
1631 "%s %td", CIO2_NAME
, q
- cio2
->queue
);
1632 vdev
->release
= video_device_release_empty
;
1633 vdev
->fops
= &cio2_v4l2_fops
;
1634 vdev
->ioctl_ops
= &cio2_v4l2_ioctl_ops
;
1635 vdev
->lock
= &cio2
->lock
;
1636 vdev
->v4l2_dev
= &cio2
->v4l2_dev
;
1637 vdev
->queue
= &q
->vbq
;
1638 vdev
->device_caps
= V4L2_CAP_VIDEO_CAPTURE_MPLANE
| V4L2_CAP_STREAMING
;
1639 video_set_drvdata(vdev
, cio2
);
1640 r
= video_register_device(vdev
, VFL_TYPE_GRABBER
, -1);
1642 dev_err(&cio2
->pci_dev
->dev
,
1643 "failed to register video device (%d)\n", r
);
1647 /* Create link from CIO2 subdev to output node */
1648 r
= media_create_pad_link(
1649 &subdev
->entity
, CIO2_PAD_SOURCE
, &vdev
->entity
, 0,
1650 MEDIA_LNK_FL_ENABLED
| MEDIA_LNK_FL_IMMUTABLE
);
1657 video_unregister_device(&q
->vdev
);
1659 vb2_queue_release(vbq
);
1661 v4l2_device_unregister_subdev(subdev
);
1663 media_entity_cleanup(&vdev
->entity
);
1664 fail_vdev_media_entity
:
1665 media_entity_cleanup(&subdev
->entity
);
1666 fail_subdev_media_entity
:
1667 cio2_fbpt_exit(q
, &cio2
->pci_dev
->dev
);
1669 mutex_destroy(&q
->lock
);
1674 static void cio2_queue_exit(struct cio2_device
*cio2
, struct cio2_queue
*q
)
1676 video_unregister_device(&q
->vdev
);
1677 media_entity_cleanup(&q
->vdev
.entity
);
1678 vb2_queue_release(&q
->vbq
);
1679 v4l2_device_unregister_subdev(&q
->subdev
);
1680 media_entity_cleanup(&q
->subdev
.entity
);
1681 cio2_fbpt_exit(q
, &cio2
->pci_dev
->dev
);
1682 mutex_destroy(&q
->lock
);
1685 static int cio2_queues_init(struct cio2_device
*cio2
)
1689 for (i
= 0; i
< CIO2_QUEUES
; i
++) {
1690 r
= cio2_queue_init(cio2
, &cio2
->queue
[i
]);
1695 if (i
== CIO2_QUEUES
)
1698 for (i
--; i
>= 0; i
--)
1699 cio2_queue_exit(cio2
, &cio2
->queue
[i
]);
1704 static void cio2_queues_exit(struct cio2_device
*cio2
)
1708 for (i
= 0; i
< CIO2_QUEUES
; i
++)
1709 cio2_queue_exit(cio2
, &cio2
->queue
[i
]);
1712 /**************** PCI interface ****************/
1714 static int cio2_pci_config_setup(struct pci_dev
*dev
)
1717 int r
= pci_enable_msi(dev
);
1720 dev_err(&dev
->dev
, "failed to enable MSI (%d)\n", r
);
1724 pci_read_config_word(dev
, PCI_COMMAND
, &pci_command
);
1725 pci_command
|= PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
|
1726 PCI_COMMAND_INTX_DISABLE
;
1727 pci_write_config_word(dev
, PCI_COMMAND
, pci_command
);
1732 static int cio2_pci_probe(struct pci_dev
*pci_dev
,
1733 const struct pci_device_id
*id
)
1735 struct cio2_device
*cio2
;
1736 void __iomem
*const *iomap
;
1739 cio2
= devm_kzalloc(&pci_dev
->dev
, sizeof(*cio2
), GFP_KERNEL
);
1742 cio2
->pci_dev
= pci_dev
;
1744 r
= pcim_enable_device(pci_dev
);
1746 dev_err(&pci_dev
->dev
, "failed to enable device (%d)\n", r
);
1750 dev_info(&pci_dev
->dev
, "device 0x%x (rev: 0x%x)\n",
1751 pci_dev
->device
, pci_dev
->revision
);
1753 r
= pcim_iomap_regions(pci_dev
, 1 << CIO2_PCI_BAR
, pci_name(pci_dev
));
1755 dev_err(&pci_dev
->dev
, "failed to remap I/O memory (%d)\n", r
);
1759 iomap
= pcim_iomap_table(pci_dev
);
1761 dev_err(&pci_dev
->dev
, "failed to iomap table\n");
1765 cio2
->base
= iomap
[CIO2_PCI_BAR
];
1767 pci_set_drvdata(pci_dev
, cio2
);
1769 pci_set_master(pci_dev
);
1771 r
= pci_set_dma_mask(pci_dev
, CIO2_DMA_MASK
);
1773 dev_err(&pci_dev
->dev
, "failed to set DMA mask (%d)\n", r
);
1777 r
= cio2_pci_config_setup(pci_dev
);
1781 r
= cio2_fbpt_init_dummy(cio2
);
1785 mutex_init(&cio2
->lock
);
1787 cio2
->media_dev
.dev
= &cio2
->pci_dev
->dev
;
1788 strlcpy(cio2
->media_dev
.model
, CIO2_DEVICE_NAME
,
1789 sizeof(cio2
->media_dev
.model
));
1790 snprintf(cio2
->media_dev
.bus_info
, sizeof(cio2
->media_dev
.bus_info
),
1791 "PCI:%s", pci_name(cio2
->pci_dev
));
1792 cio2
->media_dev
.hw_revision
= 0;
1794 media_device_init(&cio2
->media_dev
);
1795 r
= media_device_register(&cio2
->media_dev
);
1797 goto fail_mutex_destroy
;
1799 cio2
->v4l2_dev
.mdev
= &cio2
->media_dev
;
1800 r
= v4l2_device_register(&pci_dev
->dev
, &cio2
->v4l2_dev
);
1802 dev_err(&pci_dev
->dev
,
1803 "failed to register V4L2 device (%d)\n", r
);
1804 goto fail_media_device_unregister
;
1807 r
= cio2_queues_init(cio2
);
1809 goto fail_v4l2_device_unregister
;
1811 /* Register notifier for subdevices we care */
1812 r
= cio2_notifier_init(cio2
);
1814 goto fail_cio2_queue_exit
;
1816 r
= devm_request_irq(&pci_dev
->dev
, pci_dev
->irq
, cio2_irq
,
1817 IRQF_SHARED
, CIO2_NAME
, cio2
);
1819 dev_err(&pci_dev
->dev
, "failed to request IRQ (%d)\n", r
);
1823 pm_runtime_put_noidle(&pci_dev
->dev
);
1824 pm_runtime_allow(&pci_dev
->dev
);
1829 cio2_notifier_exit(cio2
);
1830 fail_cio2_queue_exit
:
1831 cio2_queues_exit(cio2
);
1832 fail_v4l2_device_unregister
:
1833 v4l2_device_unregister(&cio2
->v4l2_dev
);
1834 fail_media_device_unregister
:
1835 media_device_unregister(&cio2
->media_dev
);
1836 media_device_cleanup(&cio2
->media_dev
);
1838 mutex_destroy(&cio2
->lock
);
1839 cio2_fbpt_exit_dummy(cio2
);
1844 static void cio2_pci_remove(struct pci_dev
*pci_dev
)
1846 struct cio2_device
*cio2
= pci_get_drvdata(pci_dev
);
1849 media_device_unregister(&cio2
->media_dev
);
1850 cio2_notifier_exit(cio2
);
1851 for (i
= 0; i
< CIO2_QUEUES
; i
++)
1852 cio2_queue_exit(cio2
, &cio2
->queue
[i
]);
1853 cio2_fbpt_exit_dummy(cio2
);
1854 v4l2_device_unregister(&cio2
->v4l2_dev
);
1855 media_device_cleanup(&cio2
->media_dev
);
1856 mutex_destroy(&cio2
->lock
);
1859 static int __maybe_unused
cio2_runtime_suspend(struct device
*dev
)
1861 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1862 struct cio2_device
*cio2
= pci_get_drvdata(pci_dev
);
1863 void __iomem
*const base
= cio2
->base
;
1866 writel(CIO2_D0I3C_I3
, base
+ CIO2_REG_D0I3C
);
1867 dev_dbg(dev
, "cio2 runtime suspend.\n");
1869 pci_read_config_word(pci_dev
, pci_dev
->pm_cap
+ CIO2_PMCSR_OFFSET
, &pm
);
1870 pm
= (pm
>> CIO2_PMCSR_D0D3_SHIFT
) << CIO2_PMCSR_D0D3_SHIFT
;
1871 pm
|= CIO2_PMCSR_D3
;
1872 pci_write_config_word(pci_dev
, pci_dev
->pm_cap
+ CIO2_PMCSR_OFFSET
, pm
);
1877 static int __maybe_unused
cio2_runtime_resume(struct device
*dev
)
1879 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1880 struct cio2_device
*cio2
= pci_get_drvdata(pci_dev
);
1881 void __iomem
*const base
= cio2
->base
;
1884 writel(CIO2_D0I3C_RR
, base
+ CIO2_REG_D0I3C
);
1885 dev_dbg(dev
, "cio2 runtime resume.\n");
1887 pci_read_config_word(pci_dev
, pci_dev
->pm_cap
+ CIO2_PMCSR_OFFSET
, &pm
);
1888 pm
= (pm
>> CIO2_PMCSR_D0D3_SHIFT
) << CIO2_PMCSR_D0D3_SHIFT
;
1889 pci_write_config_word(pci_dev
, pci_dev
->pm_cap
+ CIO2_PMCSR_OFFSET
, pm
);
1895 * Helper function to advance all the elements of a circular buffer by "start"
1898 static void arrange(void *ptr
, size_t elem_size
, size_t elems
, size_t start
)
1904 { start
, elems
- 1 },
1907 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1909 /* Loop as long as we have out-of-place entries */
1910 while (CHUNK_SIZE(&arr
[0]) && CHUNK_SIZE(&arr
[1])) {
1914 * Find the number of entries that can be arranged on this
1917 size0
= min(CHUNK_SIZE(&arr
[0]), CHUNK_SIZE(&arr
[1]));
1919 /* Swap the entries in two parts of the array. */
1920 for (i
= 0; i
< size0
; i
++) {
1921 u8
*d
= ptr
+ elem_size
* (arr
[1].begin
+ i
);
1922 u8
*s
= ptr
+ elem_size
* (arr
[0].begin
+ i
);
1925 for (j
= 0; j
< elem_size
; j
++)
1929 if (CHUNK_SIZE(&arr
[0]) > CHUNK_SIZE(&arr
[1])) {
1930 /* The end of the first array remains unarranged. */
1931 arr
[0].begin
+= size0
;
1934 * The first array is fully arranged so we proceed
1935 * handling the next one.
1937 arr
[0].begin
= arr
[1].begin
;
1938 arr
[0].end
= arr
[1].begin
+ size0
- 1;
1939 arr
[1].begin
+= size0
;
1944 static void cio2_fbpt_rearrange(struct cio2_device
*cio2
, struct cio2_queue
*q
)
1948 for (i
= 0, j
= q
->bufs_first
; i
< CIO2_MAX_BUFFERS
;
1949 i
++, j
= (j
+ 1) % CIO2_MAX_BUFFERS
)
1953 if (i
== CIO2_MAX_BUFFERS
)
1957 arrange(q
->fbpt
, sizeof(struct cio2_fbpt_entry
) * CIO2_MAX_LOPS
,
1958 CIO2_MAX_BUFFERS
, j
);
1959 arrange(q
->bufs
, sizeof(struct cio2_buffer
*),
1960 CIO2_MAX_BUFFERS
, j
);
1964 * DMA clears the valid bit when accessing the buffer.
1965 * When stopping stream in suspend callback, some of the buffers
1966 * may be in invalid state. After resume, when DMA meets the invalid
1967 * buffer, it will halt and stop receiving new data.
1968 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1970 for (i
= 0; i
< CIO2_MAX_BUFFERS
; i
++)
1971 cio2_fbpt_entry_enable(cio2
, q
->fbpt
+ i
* CIO2_MAX_LOPS
);
1974 static int __maybe_unused
cio2_suspend(struct device
*dev
)
1976 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1977 struct cio2_device
*cio2
= pci_get_drvdata(pci_dev
);
1978 struct cio2_queue
*q
= cio2
->cur_queue
;
1980 dev_dbg(dev
, "cio2 suspend\n");
1981 if (!cio2
->streaming
)
1985 cio2_hw_exit(cio2
, q
);
1986 synchronize_irq(pci_dev
->irq
);
1988 pm_runtime_force_suspend(dev
);
1991 * Upon resume, hw starts to process the fbpt entries from beginning,
1992 * so relocate the queued buffs to the fbpt head before suspend.
1994 cio2_fbpt_rearrange(cio2
, q
);
2001 static int __maybe_unused
cio2_resume(struct device
*dev
)
2003 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
2004 struct cio2_device
*cio2
= pci_get_drvdata(pci_dev
);
2006 struct cio2_queue
*q
= cio2
->cur_queue
;
2008 dev_dbg(dev
, "cio2 resume\n");
2009 if (!cio2
->streaming
)
2012 r
= pm_runtime_force_resume(&cio2
->pci_dev
->dev
);
2014 dev_err(&cio2
->pci_dev
->dev
,
2015 "failed to set power %d\n", r
);
2019 r
= cio2_hw_init(cio2
, q
);
2021 dev_err(dev
, "fail to init cio2 hw\n");
2026 static const struct dev_pm_ops cio2_pm_ops
= {
2027 SET_RUNTIME_PM_OPS(&cio2_runtime_suspend
, &cio2_runtime_resume
, NULL
)
2028 SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend
, &cio2_resume
)
2031 static const struct pci_device_id cio2_pci_id_table
[] = {
2032 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
, CIO2_PCI_ID
) },
2036 MODULE_DEVICE_TABLE(pci
, cio2_pci_id_table
);
2038 static struct pci_driver cio2_pci_driver
= {
2040 .id_table
= cio2_pci_id_table
,
2041 .probe
= cio2_pci_probe
,
2042 .remove
= cio2_pci_remove
,
2048 module_pci_driver(cio2_pci_driver
);
2050 MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
2051 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
2052 MODULE_AUTHOR("Jian Xu Zheng <jian.xu.zheng@intel.com>");
2053 MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
2054 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2055 MODULE_LICENSE("GPL v2");
2056 MODULE_DESCRIPTION("IPU3 CIO2 driver");