1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017,2020 Intel Corporation
5 * Based partially on Intel IPU4 driver written by
6 * Sakari Ailus <sakari.ailus@linux.intel.com>
8 * Jouni Högander <jouni.hogander@intel.com>
10 * Antti Laakso <antti.laakso@intel.com>
14 #include <linux/bitops.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/iopoll.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/pfn.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/property.h>
24 #include <linux/vmalloc.h>
26 #include <media/ipu-bridge.h>
27 #include <media/v4l2-ctrls.h>
28 #include <media/v4l2-device.h>
29 #include <media/v4l2-event.h>
30 #include <media/v4l2-fwnode.h>
31 #include <media/v4l2-mc.h>
32 #include <media/v4l2-ioctl.h>
33 #include <media/videobuf2-dma-sg.h>
35 #include "ipu3-cio2.h"
37 struct ipu3_cio2_fmt
{
45 * These are raw formats used in Intel's third generation of
46 * Image Processing Unit known as IPU3.
47 * 10bit raw bayer packed, 32 bytes for every 25 pixels,
48 * last LSB 6 bits unused.
50 static const struct ipu3_cio2_fmt formats
[] = {
51 { /* put default entry at beginning */
52 .mbus_code
= MEDIA_BUS_FMT_SGRBG10_1X10
,
53 .fourcc
= V4L2_PIX_FMT_IPU3_SGRBG10
,
57 .mbus_code
= MEDIA_BUS_FMT_SGBRG10_1X10
,
58 .fourcc
= V4L2_PIX_FMT_IPU3_SGBRG10
,
62 .mbus_code
= MEDIA_BUS_FMT_SBGGR10_1X10
,
63 .fourcc
= V4L2_PIX_FMT_IPU3_SBGGR10
,
67 .mbus_code
= MEDIA_BUS_FMT_SRGGB10_1X10
,
68 .fourcc
= V4L2_PIX_FMT_IPU3_SRGGB10
,
72 .mbus_code
= MEDIA_BUS_FMT_Y10_1X10
,
73 .fourcc
= V4L2_PIX_FMT_IPU3_Y10
,
80 * cio2_find_format - lookup color format by fourcc or/and media bus code
81 * @pixelformat: fourcc to match, ignored if null
82 * @mbus_code: media bus code to match, ignored if null
84 static const struct ipu3_cio2_fmt
*cio2_find_format(const u32
*pixelformat
,
89 for (i
= 0; i
< ARRAY_SIZE(formats
); i
++) {
90 if (pixelformat
&& *pixelformat
!= formats
[i
].fourcc
)
92 if (mbus_code
&& *mbus_code
!= formats
[i
].mbus_code
)
101 static inline u32
cio2_bytesperline(const unsigned int width
)
104 * 64 bytes for every 50 pixels, the line length
105 * in bytes is multiple of 64 (line end alignment).
107 return DIV_ROUND_UP(width
, 50) * 64;
110 /**************** FBPT operations ****************/
112 static void cio2_fbpt_exit_dummy(struct cio2_device
*cio2
)
114 struct device
*dev
= &cio2
->pci_dev
->dev
;
116 if (cio2
->dummy_lop
) {
117 dma_free_coherent(dev
, PAGE_SIZE
, cio2
->dummy_lop
,
118 cio2
->dummy_lop_bus_addr
);
119 cio2
->dummy_lop
= NULL
;
121 if (cio2
->dummy_page
) {
122 dma_free_coherent(dev
, PAGE_SIZE
, cio2
->dummy_page
,
123 cio2
->dummy_page_bus_addr
);
124 cio2
->dummy_page
= NULL
;
128 static int cio2_fbpt_init_dummy(struct cio2_device
*cio2
)
130 struct device
*dev
= &cio2
->pci_dev
->dev
;
133 cio2
->dummy_page
= dma_alloc_coherent(dev
, PAGE_SIZE
,
134 &cio2
->dummy_page_bus_addr
,
136 cio2
->dummy_lop
= dma_alloc_coherent(dev
, PAGE_SIZE
,
137 &cio2
->dummy_lop_bus_addr
,
139 if (!cio2
->dummy_page
|| !cio2
->dummy_lop
) {
140 cio2_fbpt_exit_dummy(cio2
);
144 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
145 * Initialize each entry to dummy_page bus base address.
147 for (i
= 0; i
< CIO2_LOP_ENTRIES
; i
++)
148 cio2
->dummy_lop
[i
] = PFN_DOWN(cio2
->dummy_page_bus_addr
);
153 static void cio2_fbpt_entry_enable(struct cio2_device
*cio2
,
154 struct cio2_fbpt_entry entry
[CIO2_MAX_LOPS
])
157 * The CPU first initializes some fields in fbpt, then sets
158 * the VALID bit, this barrier is to ensure that the DMA(device)
159 * does not see the VALID bit enabled before other fields are
160 * initialized; otherwise it could lead to havoc.
165 * Request interrupts for start and completion
166 * Valid bit is applicable only to 1st entry
168 entry
[0].first_entry
.ctrl
= CIO2_FBPT_CTRL_VALID
|
169 CIO2_FBPT_CTRL_IOC
| CIO2_FBPT_CTRL_IOS
;
172 /* Initialize fpbt entries to point to dummy frame */
173 static void cio2_fbpt_entry_init_dummy(struct cio2_device
*cio2
,
174 struct cio2_fbpt_entry
175 entry
[CIO2_MAX_LOPS
])
179 entry
[0].first_entry
.first_page_offset
= 0;
180 entry
[1].second_entry
.num_of_pages
= CIO2_LOP_ENTRIES
* CIO2_MAX_LOPS
;
181 entry
[1].second_entry
.last_page_available_bytes
= PAGE_SIZE
- 1;
183 for (i
= 0; i
< CIO2_MAX_LOPS
; i
++)
184 entry
[i
].lop_page_addr
= PFN_DOWN(cio2
->dummy_lop_bus_addr
);
186 cio2_fbpt_entry_enable(cio2
, entry
);
189 /* Initialize fpbt entries to point to a given buffer */
190 static void cio2_fbpt_entry_init_buf(struct cio2_device
*cio2
,
191 struct cio2_buffer
*b
,
192 struct cio2_fbpt_entry
193 entry
[CIO2_MAX_LOPS
])
195 struct vb2_buffer
*vb
= &b
->vbb
.vb2_buf
;
196 unsigned int length
= vb
->planes
[0].length
;
199 entry
[0].first_entry
.first_page_offset
= b
->offset
;
200 remaining
= length
+ entry
[0].first_entry
.first_page_offset
;
201 entry
[1].second_entry
.num_of_pages
= PFN_UP(remaining
);
203 * last_page_available_bytes has the offset of the last byte in the
204 * last page which is still accessible by DMA. DMA cannot access
205 * beyond this point. Valid range for this is from 0 to 4095.
206 * 0 indicates 1st byte in the page is DMA accessible.
207 * 4095 (PAGE_SIZE - 1) means every single byte in the last page
208 * is available for DMA transfer.
210 remaining
= offset_in_page(remaining
) ?: PAGE_SIZE
;
211 entry
[1].second_entry
.last_page_available_bytes
= remaining
- 1;
215 while (remaining
> 0) {
216 entry
->lop_page_addr
= PFN_DOWN(b
->lop_bus_addr
[i
]);
217 remaining
-= CIO2_LOP_ENTRIES
* PAGE_SIZE
;
223 * The first not meaningful FBPT entry should point to a valid LOP
225 entry
->lop_page_addr
= PFN_DOWN(cio2
->dummy_lop_bus_addr
);
227 cio2_fbpt_entry_enable(cio2
, entry
);
230 static int cio2_fbpt_init(struct cio2_device
*cio2
, struct cio2_queue
*q
)
232 struct device
*dev
= &cio2
->pci_dev
->dev
;
234 q
->fbpt
= dma_alloc_coherent(dev
, CIO2_FBPT_SIZE
, &q
->fbpt_bus_addr
,
242 static void cio2_fbpt_exit(struct cio2_queue
*q
, struct device
*dev
)
244 dma_free_coherent(dev
, CIO2_FBPT_SIZE
, q
->fbpt
, q
->fbpt_bus_addr
);
247 /**************** CSI2 hardware setup ****************/
250 * The CSI2 receiver has several parameters affecting
251 * the receiver timings. These depend on the MIPI bus frequency
252 * F in Hz (sensor transmitter rate) as follows:
253 * register value = (A/1e9 + B * UI) / COUNT_ACC
255 * UI = 1 / (2 * F) in seconds
256 * COUNT_ACC = counter accuracy in seconds
257 * For IPU3 COUNT_ACC = 0.0625
259 * A and B are coefficients from the table below,
260 * depending whether the register minimum or maximum value is
264 * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
265 * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
267 * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
268 * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
269 * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
270 * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
271 * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
272 * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
273 * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
274 * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
276 * We use the minimum values of both A and B.
280 * shift for keeping value range suitable for 32-bit integer arithmetic
282 #define LIMIT_SHIFT 8
284 static s32
cio2_rx_timing(s32 a
, s32 b
, s64 freq
, int def
)
286 const u32 accinv
= 16; /* invert of counter resolution */
287 const u32 uiinv
= 500000000; /* 1e9 / 2 */
290 freq
>>= LIMIT_SHIFT
;
292 if (WARN_ON(freq
<= 0 || freq
> S32_MAX
))
295 * b could be 0, -2 or -8, so |accinv * b| is always
296 * less than (1 << ds) and thus |r| < 500000000.
298 r
= accinv
* b
* (uiinv
>> LIMIT_SHIFT
);
300 /* max value of a is 95 */
306 /* Calculate the delay value for termination enable of clock lane HS Rx */
307 static int cio2_csi2_calc_timing(struct cio2_device
*cio2
, struct cio2_queue
*q
,
308 struct cio2_csi2_timing
*timing
,
309 unsigned int bpp
, unsigned int lanes
)
311 struct device
*dev
= &cio2
->pci_dev
->dev
;
317 freq
= v4l2_get_link_freq(q
->sensor
->ctrl_handler
, bpp
, lanes
* 2);
319 dev_err(dev
, "error %lld, invalid link_freq\n", freq
);
323 timing
->clk_termen
= cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A
,
324 CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B
,
326 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT
);
327 timing
->clk_settle
= cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A
,
328 CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B
,
330 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT
);
331 timing
->dat_termen
= cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A
,
332 CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B
,
334 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT
);
335 timing
->dat_settle
= cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A
,
336 CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B
,
338 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT
);
340 dev_dbg(dev
, "freq ct value is %d\n", timing
->clk_termen
);
341 dev_dbg(dev
, "freq cs value is %d\n", timing
->clk_settle
);
342 dev_dbg(dev
, "freq dt value is %d\n", timing
->dat_termen
);
343 dev_dbg(dev
, "freq ds value is %d\n", timing
->dat_settle
);
348 static int cio2_hw_init(struct cio2_device
*cio2
, struct cio2_queue
*q
)
350 static const int NUM_VCS
= 4;
351 static const int SID
; /* Stream id */
352 static const int ENTRY
;
353 static const int FBPT_WIDTH
= DIV_ROUND_UP(CIO2_MAX_LOPS
,
354 CIO2_FBPT_SUBENTRY_UNIT
);
355 const u32 num_buffers1
= CIO2_MAX_BUFFERS
- 1;
356 const struct ipu3_cio2_fmt
*fmt
;
357 void __iomem
*const base
= cio2
->base
;
358 u8 lanes
, csi2bus
= q
->csi2
.port
;
359 u8 sensor_vc
= SENSOR_VIR_CH_DFLT
;
360 struct cio2_csi2_timing timing
= { 0 };
363 fmt
= cio2_find_format(NULL
, &q
->subdev_fmt
.code
);
367 lanes
= q
->csi2
.lanes
;
369 r
= cio2_csi2_calc_timing(cio2
, q
, &timing
, fmt
->bpp
, lanes
);
373 writel(timing
.clk_termen
, q
->csi_rx_base
+
374 CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX
));
375 writel(timing
.clk_settle
, q
->csi_rx_base
+
376 CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX
));
378 for (i
= 0; i
< lanes
; i
++) {
379 writel(timing
.dat_termen
, q
->csi_rx_base
+
380 CIO2_REG_CSIRX_DLY_CNT_TERMEN(i
));
381 writel(timing
.dat_settle
, q
->csi_rx_base
+
382 CIO2_REG_CSIRX_DLY_CNT_SETTLE(i
));
385 writel(CIO2_PBM_WMCTRL1_MIN_2CK
|
386 CIO2_PBM_WMCTRL1_MID1_2CK
|
387 CIO2_PBM_WMCTRL1_MID2_2CK
, base
+ CIO2_REG_PBM_WMCTRL1
);
388 writel(CIO2_PBM_WMCTRL2_HWM_2CK
<< CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT
|
389 CIO2_PBM_WMCTRL2_LWM_2CK
<< CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT
|
390 CIO2_PBM_WMCTRL2_OBFFWM_2CK
<<
391 CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT
|
392 CIO2_PBM_WMCTRL2_TRANSDYN
<< CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT
|
393 CIO2_PBM_WMCTRL2_OBFF_MEM_EN
, base
+ CIO2_REG_PBM_WMCTRL2
);
394 writel(CIO2_PBM_ARB_CTRL_LANES_DIV
<<
395 CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT
|
396 CIO2_PBM_ARB_CTRL_LE_EN
|
397 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN
<<
398 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT
|
399 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP
<<
400 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT
,
401 base
+ CIO2_REG_PBM_ARB_CTRL
);
402 writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK
,
403 q
->csi_rx_base
+ CIO2_REG_CSIRX_STATUS_DLANE_HS
);
404 writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK
,
405 q
->csi_rx_base
+ CIO2_REG_CSIRX_STATUS_DLANE_LP
);
407 writel(CIO2_FB_HPLL_FREQ
, base
+ CIO2_REG_FB_HPLL_FREQ
);
408 writel(CIO2_ISCLK_RATIO
, base
+ CIO2_REG_ISCLK_RATIO
);
410 /* Configure MIPI backend */
411 for (i
= 0; i
< NUM_VCS
; i
++)
412 writel(1, q
->csi_rx_base
+ CIO2_REG_MIPIBE_SP_LUT_ENTRY(i
));
414 /* There are 16 short packet LUT entry */
415 for (i
= 0; i
< 16; i
++)
416 writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD
,
417 q
->csi_rx_base
+ CIO2_REG_MIPIBE_LP_LUT_ENTRY(i
));
418 writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD
,
419 q
->csi_rx_base
+ CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD
);
421 writel(CIO2_INT_EN_EXT_IE_MASK
, base
+ CIO2_REG_INT_EN_EXT_IE
);
422 writel(CIO2_IRQCTRL_MASK
, q
->csi_rx_base
+ CIO2_REG_IRQCTRL_MASK
);
423 writel(CIO2_IRQCTRL_MASK
, q
->csi_rx_base
+ CIO2_REG_IRQCTRL_ENABLE
);
424 writel(0, q
->csi_rx_base
+ CIO2_REG_IRQCTRL_EDGE
);
425 writel(0, q
->csi_rx_base
+ CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE
);
426 writel(CIO2_INT_EN_EXT_OE_MASK
, base
+ CIO2_REG_INT_EN_EXT_OE
);
428 writel(CIO2_REG_INT_EN_IRQ
| CIO2_INT_IOC(CIO2_DMA_CHAN
) |
429 CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN
),
430 base
+ CIO2_REG_INT_EN
);
432 writel((CIO2_PXM_PXF_FMT_CFG_BPP_10
| CIO2_PXM_PXF_FMT_CFG_PCK_64B
)
433 << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT
,
434 base
+ CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus
));
435 writel(SID
<< CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT
|
436 sensor_vc
<< CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT
|
437 fmt
->mipicode
<< CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT
,
438 q
->csi_rx_base
+ CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY
));
439 writel(0, q
->csi_rx_base
+ CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc
));
440 writel(0, q
->csi_rx_base
+ CIO2_REG_MIPIBE_FORCE_RAW8
);
441 writel(0, base
+ CIO2_REG_PXM_SID2BID0(csi2bus
));
443 writel(lanes
, q
->csi_rx_base
+ CIO2_REG_CSIRX_NOF_ENABLED_LANES
);
444 writel(CIO2_CGC_PRIM_TGE
|
448 CIO2_CGC_CSI2_INTERFRAME_TGE
|
449 CIO2_CGC_CSI2_PORT_DCGE
|
454 CIO2_CGC_CLKGATE_HOLDOFF
<< CIO2_CGC_CLKGATE_HOLDOFF_SHIFT
|
455 CIO2_CGC_CSI_CLKGATE_HOLDOFF
456 << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT
, base
+ CIO2_REG_CGC
);
457 writel(CIO2_LTRCTRL_LTRDYNEN
, base
+ CIO2_REG_LTRCTRL
);
458 writel(CIO2_LTRVAL0_VAL
<< CIO2_LTRVAL02_VAL_SHIFT
|
459 CIO2_LTRVAL0_SCALE
<< CIO2_LTRVAL02_SCALE_SHIFT
|
460 CIO2_LTRVAL1_VAL
<< CIO2_LTRVAL13_VAL_SHIFT
|
461 CIO2_LTRVAL1_SCALE
<< CIO2_LTRVAL13_SCALE_SHIFT
,
462 base
+ CIO2_REG_LTRVAL01
);
463 writel(CIO2_LTRVAL2_VAL
<< CIO2_LTRVAL02_VAL_SHIFT
|
464 CIO2_LTRVAL2_SCALE
<< CIO2_LTRVAL02_SCALE_SHIFT
|
465 CIO2_LTRVAL3_VAL
<< CIO2_LTRVAL13_VAL_SHIFT
|
466 CIO2_LTRVAL3_SCALE
<< CIO2_LTRVAL13_SCALE_SHIFT
,
467 base
+ CIO2_REG_LTRVAL23
);
469 for (i
= 0; i
< CIO2_NUM_DMA_CHAN
; i
++) {
470 writel(0, base
+ CIO2_REG_CDMABA(i
));
471 writel(0, base
+ CIO2_REG_CDMAC0(i
));
472 writel(0, base
+ CIO2_REG_CDMAC1(i
));
476 writel(PFN_DOWN(q
->fbpt_bus_addr
), base
+ CIO2_REG_CDMABA(CIO2_DMA_CHAN
));
478 writel(num_buffers1
<< CIO2_CDMAC0_FBPT_LEN_SHIFT
|
479 FBPT_WIDTH
<< CIO2_CDMAC0_FBPT_WIDTH_SHIFT
|
480 CIO2_CDMAC0_DMA_INTR_ON_FE
|
481 CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL
|
483 CIO2_CDMAC0_DMA_INTR_ON_FS
|
484 CIO2_CDMAC0_DMA_HALTED
, base
+ CIO2_REG_CDMAC0(CIO2_DMA_CHAN
));
486 writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT
,
487 base
+ CIO2_REG_CDMAC1(CIO2_DMA_CHAN
));
489 writel(0, base
+ CIO2_REG_PBM_FOPN_ABORT
);
491 writel(CIO2_PXM_FRF_CFG_CRC_TH
<< CIO2_PXM_FRF_CFG_CRC_TH_SHIFT
|
492 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR
|
493 CIO2_PXM_FRF_CFG_MSK_ECC_RE
|
494 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE
,
495 base
+ CIO2_REG_PXM_FRF_CFG(q
->csi2
.port
));
497 /* Clear interrupts */
498 writel(CIO2_IRQCTRL_MASK
, q
->csi_rx_base
+ CIO2_REG_IRQCTRL_CLEAR
);
499 writel(~0, base
+ CIO2_REG_INT_STS_EXT_OE
);
500 writel(~0, base
+ CIO2_REG_INT_STS_EXT_IE
);
501 writel(~0, base
+ CIO2_REG_INT_STS
);
503 /* Enable devices, starting from the last device in the pipe */
504 writel(1, q
->csi_rx_base
+ CIO2_REG_MIPIBE_ENABLE
);
505 writel(1, q
->csi_rx_base
+ CIO2_REG_CSIRX_ENABLE
);
510 static void cio2_hw_exit(struct cio2_device
*cio2
, struct cio2_queue
*q
)
512 struct device
*dev
= &cio2
->pci_dev
->dev
;
513 void __iomem
*const base
= cio2
->base
;
518 /* Disable CSI receiver and MIPI backend devices */
519 writel(0, q
->csi_rx_base
+ CIO2_REG_IRQCTRL_MASK
);
520 writel(0, q
->csi_rx_base
+ CIO2_REG_IRQCTRL_ENABLE
);
521 writel(0, q
->csi_rx_base
+ CIO2_REG_CSIRX_ENABLE
);
522 writel(0, q
->csi_rx_base
+ CIO2_REG_MIPIBE_ENABLE
);
525 writel(0, base
+ CIO2_REG_CDMAC0(CIO2_DMA_CHAN
));
526 ret
= readl_poll_timeout(base
+ CIO2_REG_CDMAC0(CIO2_DMA_CHAN
),
527 value
, value
& CIO2_CDMAC0_DMA_HALTED
,
530 dev_err(dev
, "DMA %i can not be halted\n", CIO2_DMA_CHAN
);
532 for (i
= 0; i
< CIO2_NUM_PORTS
; i
++) {
533 writel(readl(base
+ CIO2_REG_PXM_FRF_CFG(i
)) |
534 CIO2_PXM_FRF_CFG_ABORT
, base
+ CIO2_REG_PXM_FRF_CFG(i
));
535 writel(readl(base
+ CIO2_REG_PBM_FOPN_ABORT
) |
536 CIO2_PBM_FOPN_ABORT(i
), base
+ CIO2_REG_PBM_FOPN_ABORT
);
540 static void cio2_buffer_done(struct cio2_device
*cio2
, unsigned int dma_chan
)
542 struct device
*dev
= &cio2
->pci_dev
->dev
;
543 struct cio2_queue
*q
= cio2
->cur_queue
;
544 struct cio2_fbpt_entry
*entry
;
545 u64 ns
= ktime_get_ns();
547 if (dma_chan
>= CIO2_QUEUES
) {
548 dev_err(dev
, "bad DMA channel %i\n", dma_chan
);
552 entry
= &q
->fbpt
[q
->bufs_first
* CIO2_MAX_LOPS
];
553 if (entry
->first_entry
.ctrl
& CIO2_FBPT_CTRL_VALID
) {
554 dev_warn(dev
, "no ready buffers found on DMA channel %u\n",
559 /* Find out which buffer(s) are ready */
561 struct cio2_buffer
*b
;
563 b
= q
->bufs
[q
->bufs_first
];
565 unsigned int received
= entry
[1].second_entry
.num_of_bytes
;
566 unsigned long payload
=
567 vb2_get_plane_payload(&b
->vbb
.vb2_buf
, 0);
569 q
->bufs
[q
->bufs_first
] = NULL
;
570 atomic_dec(&q
->bufs_queued
);
571 dev_dbg(dev
, "buffer %i done\n", b
->vbb
.vb2_buf
.index
);
573 b
->vbb
.vb2_buf
.timestamp
= ns
;
574 b
->vbb
.field
= V4L2_FIELD_NONE
;
575 b
->vbb
.sequence
= atomic_read(&q
->frame_sequence
);
576 if (payload
!= received
)
578 "payload length is %lu, received %u\n",
580 vb2_buffer_done(&b
->vbb
.vb2_buf
, VB2_BUF_STATE_DONE
);
582 atomic_inc(&q
->frame_sequence
);
583 cio2_fbpt_entry_init_dummy(cio2
, entry
);
584 q
->bufs_first
= (q
->bufs_first
+ 1) % CIO2_MAX_BUFFERS
;
585 entry
= &q
->fbpt
[q
->bufs_first
* CIO2_MAX_LOPS
];
586 } while (!(entry
->first_entry
.ctrl
& CIO2_FBPT_CTRL_VALID
));
589 static void cio2_queue_event_sof(struct cio2_device
*cio2
, struct cio2_queue
*q
)
592 * For the user space camera control algorithms it is essential
593 * to know when the reception of a frame has begun. That's often
594 * the best timing information to get from the hardware.
596 struct v4l2_event event
= {
597 .type
= V4L2_EVENT_FRAME_SYNC
,
598 .u
.frame_sync
.frame_sequence
= atomic_read(&q
->frame_sequence
),
601 v4l2_event_queue(q
->subdev
.devnode
, &event
);
604 static const char *const cio2_irq_errs
[] = {
605 "single packet header error corrected",
606 "multiple packet header errors detected",
607 "payload checksum (CRC) error",
609 "reserved short packet data type detected",
610 "reserved long packet data type detected",
611 "incomplete long packet detected",
614 "DPHY start of transmission error",
615 "DPHY synchronization error",
617 "escape mode trigger event",
618 "escape mode ultra-low power state for data lane(s)",
619 "escape mode ultra-low power state exit for clock lane",
620 "inter-frame short packet discarded",
621 "inter-frame long packet discarded",
622 "non-matching Long Packet stalled",
625 static void cio2_irq_log_irq_errs(struct device
*dev
, u8 port
, u32 status
)
627 unsigned long csi2_status
= status
;
630 for_each_set_bit(i
, &csi2_status
, ARRAY_SIZE(cio2_irq_errs
))
631 dev_err(dev
, "CSI-2 receiver port %i: %s\n",
632 port
, cio2_irq_errs
[i
]);
634 if (fls_long(csi2_status
) >= ARRAY_SIZE(cio2_irq_errs
))
635 dev_warn(dev
, "unknown CSI2 error 0x%lx on port %i\n",
639 static const char *const cio2_port_errs
[] = {
641 "DPHY not recoverable",
642 "ECC not recoverable",
649 static void cio2_irq_log_port_errs(struct device
*dev
, u8 port
, u32 status
)
651 unsigned long port_status
= status
;
654 for_each_set_bit(i
, &port_status
, ARRAY_SIZE(cio2_port_errs
))
655 dev_err(dev
, "port %i error %s\n", port
, cio2_port_errs
[i
]);
658 static void cio2_irq_handle_once(struct cio2_device
*cio2
, u32 int_status
)
660 struct device
*dev
= &cio2
->pci_dev
->dev
;
661 void __iomem
*const base
= cio2
->base
;
663 if (int_status
& CIO2_INT_IOOE
) {
665 * Interrupt on Output Error:
666 * 1) SRAM is full and FS received, or
667 * 2) An invalid bit detected by DMA.
669 u32 oe_status
, oe_clear
;
671 oe_clear
= readl(base
+ CIO2_REG_INT_STS_EXT_OE
);
672 oe_status
= oe_clear
;
674 if (oe_status
& CIO2_INT_EXT_OE_DMAOE_MASK
) {
675 dev_err(dev
, "DMA output error: 0x%x\n",
676 (oe_status
& CIO2_INT_EXT_OE_DMAOE_MASK
)
677 >> CIO2_INT_EXT_OE_DMAOE_SHIFT
);
678 oe_status
&= ~CIO2_INT_EXT_OE_DMAOE_MASK
;
680 if (oe_status
& CIO2_INT_EXT_OE_OES_MASK
) {
681 dev_err(dev
, "DMA output error on CSI2 buses: 0x%x\n",
682 (oe_status
& CIO2_INT_EXT_OE_OES_MASK
)
683 >> CIO2_INT_EXT_OE_OES_SHIFT
);
684 oe_status
&= ~CIO2_INT_EXT_OE_OES_MASK
;
686 writel(oe_clear
, base
+ CIO2_REG_INT_STS_EXT_OE
);
688 dev_warn(dev
, "unknown interrupt 0x%x on OE\n",
690 int_status
&= ~CIO2_INT_IOOE
;
693 if (int_status
& CIO2_INT_IOC_MASK
) {
694 /* DMA IO done -- frame ready */
698 for (d
= 0; d
< CIO2_NUM_DMA_CHAN
; d
++)
699 if (int_status
& CIO2_INT_IOC(d
)) {
700 clr
|= CIO2_INT_IOC(d
);
701 cio2_buffer_done(cio2
, d
);
706 if (int_status
& CIO2_INT_IOS_IOLN_MASK
) {
707 /* DMA IO starts or reached specified line */
711 for (d
= 0; d
< CIO2_NUM_DMA_CHAN
; d
++)
712 if (int_status
& CIO2_INT_IOS_IOLN(d
)) {
713 clr
|= CIO2_INT_IOS_IOLN(d
);
714 if (d
== CIO2_DMA_CHAN
)
715 cio2_queue_event_sof(cio2
,
721 if (int_status
& (CIO2_INT_IOIE
| CIO2_INT_IOIRQ
)) {
722 /* CSI2 receiver (error) interrupt */
726 ie_status
= readl(base
+ CIO2_REG_INT_STS_EXT_IE
);
728 for (port
= 0; port
< CIO2_NUM_PORTS
; port
++) {
729 u32 port_status
= (ie_status
>> (port
* 8)) & 0xff;
731 cio2_irq_log_port_errs(dev
, port
, port_status
);
733 if (ie_status
& CIO2_INT_EXT_IE_IRQ(port
)) {
734 void __iomem
*csi_rx_base
=
735 base
+ CIO2_REG_PIPE_BASE(port
);
738 csi2_status
= readl(csi_rx_base
+
739 CIO2_REG_IRQCTRL_STATUS
);
741 cio2_irq_log_irq_errs(dev
, port
, csi2_status
);
744 csi_rx_base
+ CIO2_REG_IRQCTRL_CLEAR
);
748 writel(ie_status
, base
+ CIO2_REG_INT_STS_EXT_IE
);
750 int_status
&= ~(CIO2_INT_IOIE
| CIO2_INT_IOIRQ
);
754 dev_warn(dev
, "unknown interrupt 0x%x on INT\n", int_status
);
757 static irqreturn_t
cio2_irq(int irq
, void *cio2_ptr
)
759 struct cio2_device
*cio2
= cio2_ptr
;
760 void __iomem
*const base
= cio2
->base
;
761 struct device
*dev
= &cio2
->pci_dev
->dev
;
764 int_status
= readl(base
+ CIO2_REG_INT_STS
);
765 dev_dbg(dev
, "isr enter - interrupt status 0x%x\n", int_status
);
770 writel(int_status
, base
+ CIO2_REG_INT_STS
);
771 cio2_irq_handle_once(cio2
, int_status
);
772 int_status
= readl(base
+ CIO2_REG_INT_STS
);
774 dev_dbg(dev
, "pending status 0x%x\n", int_status
);
775 } while (int_status
);
780 /**************** Videobuf2 interface ****************/
782 static void cio2_vb2_return_all_buffers(struct cio2_queue
*q
,
783 enum vb2_buffer_state state
)
787 for (i
= 0; i
< CIO2_MAX_BUFFERS
; i
++) {
789 atomic_dec(&q
->bufs_queued
);
790 vb2_buffer_done(&q
->bufs
[i
]->vbb
.vb2_buf
,
797 static int cio2_vb2_queue_setup(struct vb2_queue
*vq
,
798 unsigned int *num_buffers
,
799 unsigned int *num_planes
,
800 unsigned int sizes
[],
801 struct device
*alloc_devs
[])
803 struct cio2_device
*cio2
= vb2_get_drv_priv(vq
);
804 struct device
*dev
= &cio2
->pci_dev
->dev
;
805 struct cio2_queue
*q
= vb2q_to_cio2_queue(vq
);
808 if (*num_planes
&& *num_planes
< q
->format
.num_planes
)
811 for (i
= 0; i
< q
->format
.num_planes
; ++i
) {
812 if (*num_planes
&& sizes
[i
] < q
->format
.plane_fmt
[i
].sizeimage
)
814 sizes
[i
] = q
->format
.plane_fmt
[i
].sizeimage
;
818 *num_planes
= q
->format
.num_planes
;
819 *num_buffers
= clamp_val(*num_buffers
, 1, CIO2_MAX_BUFFERS
);
821 /* Initialize buffer queue */
822 for (i
= 0; i
< CIO2_MAX_BUFFERS
; i
++) {
824 cio2_fbpt_entry_init_dummy(cio2
, &q
->fbpt
[i
* CIO2_MAX_LOPS
]);
826 atomic_set(&q
->bufs_queued
, 0);
833 /* Called after each buffer is allocated */
834 static int cio2_vb2_buf_init(struct vb2_buffer
*vb
)
836 struct cio2_device
*cio2
= vb2_get_drv_priv(vb
->vb2_queue
);
837 struct device
*dev
= &cio2
->pci_dev
->dev
;
838 struct cio2_buffer
*b
= to_cio2_buffer(vb
);
839 unsigned int pages
= PFN_UP(vb
->planes
[0].length
);
840 unsigned int lops
= DIV_ROUND_UP(pages
+ 1, CIO2_LOP_ENTRIES
);
842 struct sg_dma_page_iter sg_iter
;
845 if (lops
<= 0 || lops
> CIO2_MAX_LOPS
) {
846 dev_err(dev
, "%s: bad buffer size (%i)\n", __func__
,
847 vb
->planes
[0].length
);
848 return -ENOSPC
; /* Should never happen */
851 memset(b
->lop
, 0, sizeof(b
->lop
));
852 /* Allocate LOP table */
853 for (i
= 0; i
< lops
; i
++) {
854 b
->lop
[i
] = dma_alloc_coherent(dev
, PAGE_SIZE
,
855 &b
->lop_bus_addr
[i
], GFP_KERNEL
);
861 sg
= vb2_dma_sg_plane_desc(vb
, 0);
865 if (sg
->nents
&& sg
->sgl
)
866 b
->offset
= sg
->sgl
->offset
;
869 for_each_sg_dma_page(sg
->sgl
, &sg_iter
, sg
->nents
, 0) {
872 b
->lop
[i
][j
] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter
));
874 if (j
== CIO2_LOP_ENTRIES
) {
880 b
->lop
[i
][j
] = PFN_DOWN(cio2
->dummy_page_bus_addr
);
884 dma_free_coherent(dev
, PAGE_SIZE
, b
->lop
[i
], b
->lop_bus_addr
[i
]);
888 /* Transfer buffer ownership to cio2 */
889 static void cio2_vb2_buf_queue(struct vb2_buffer
*vb
)
891 struct cio2_device
*cio2
= vb2_get_drv_priv(vb
->vb2_queue
);
892 struct device
*dev
= &cio2
->pci_dev
->dev
;
893 struct cio2_queue
*q
=
894 container_of(vb
->vb2_queue
, struct cio2_queue
, vbq
);
895 struct cio2_buffer
*b
= to_cio2_buffer(vb
);
896 struct cio2_fbpt_entry
*entry
;
898 unsigned int i
, j
, next
= q
->bufs_next
;
899 int bufs_queued
= atomic_inc_return(&q
->bufs_queued
);
902 dev_dbg(dev
, "queue buffer %d\n", vb
->index
);
905 * This code queues the buffer to the CIO2 DMA engine, which starts
906 * running once streaming has started. It is possible that this code
907 * gets pre-empted due to increased CPU load. Upon this, the driver
908 * does not get an opportunity to queue new buffers to the CIO2 DMA
909 * engine. When the DMA engine encounters an FBPT entry without the
910 * VALID bit set, the DMA engine halts, which requires a restart of
911 * the DMA engine and sensor, to continue streaming.
912 * This is not desired and is highly unlikely given that there are
913 * 32 FBPT entries that the DMA engine needs to process, to run into
914 * an FBPT entry, without the VALID bit set. We try to mitigate this
915 * by disabling interrupts for the duration of this queueing.
917 local_irq_save(flags
);
919 fbpt_rp
= (readl(cio2
->base
+ CIO2_REG_CDMARI(CIO2_DMA_CHAN
))
920 >> CIO2_CDMARI_FBPT_RP_SHIFT
)
921 & CIO2_CDMARI_FBPT_RP_MASK
;
924 * fbpt_rp is the fbpt entry that the dma is currently working
925 * on, but since it could jump to next entry at any time,
926 * assume that we might already be there.
928 fbpt_rp
= (fbpt_rp
+ 1) % CIO2_MAX_BUFFERS
;
930 if (bufs_queued
<= 1 || fbpt_rp
== next
)
931 /* Buffers were drained */
932 next
= (fbpt_rp
+ 1) % CIO2_MAX_BUFFERS
;
934 for (i
= 0; i
< CIO2_MAX_BUFFERS
; i
++) {
936 * We have allocated CIO2_MAX_BUFFERS circularly for the
937 * hw, the user has requested N buffer queue. The driver
938 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
939 * user queues a buffer, there necessarily is a free buffer.
941 if (!q
->bufs
[next
]) {
943 entry
= &q
->fbpt
[next
* CIO2_MAX_LOPS
];
944 cio2_fbpt_entry_init_buf(cio2
, b
, entry
);
945 local_irq_restore(flags
);
946 q
->bufs_next
= (next
+ 1) % CIO2_MAX_BUFFERS
;
947 for (j
= 0; j
< vb
->num_planes
; j
++)
948 vb2_set_plane_payload(vb
, j
,
949 q
->format
.plane_fmt
[j
].sizeimage
);
953 dev_dbg(dev
, "entry %i was full!\n", next
);
954 next
= (next
+ 1) % CIO2_MAX_BUFFERS
;
957 local_irq_restore(flags
);
958 dev_err(dev
, "error: all cio2 entries were full!\n");
959 atomic_dec(&q
->bufs_queued
);
960 vb2_buffer_done(vb
, VB2_BUF_STATE_ERROR
);
963 /* Called when each buffer is freed */
964 static void cio2_vb2_buf_cleanup(struct vb2_buffer
*vb
)
966 struct cio2_device
*cio2
= vb2_get_drv_priv(vb
->vb2_queue
);
967 struct device
*dev
= &cio2
->pci_dev
->dev
;
968 struct cio2_buffer
*b
= to_cio2_buffer(vb
);
972 for (i
= 0; i
< CIO2_MAX_LOPS
; i
++) {
974 dma_free_coherent(dev
, PAGE_SIZE
,
975 b
->lop
[i
], b
->lop_bus_addr
[i
]);
979 static int cio2_vb2_start_streaming(struct vb2_queue
*vq
, unsigned int count
)
981 struct cio2_queue
*q
= vb2q_to_cio2_queue(vq
);
982 struct cio2_device
*cio2
= vb2_get_drv_priv(vq
);
983 struct device
*dev
= &cio2
->pci_dev
->dev
;
987 atomic_set(&q
->frame_sequence
, 0);
989 r
= pm_runtime_resume_and_get(dev
);
991 dev_info(dev
, "failed to set power %d\n", r
);
995 r
= video_device_pipeline_start(&q
->vdev
, &q
->pipe
);
999 r
= cio2_hw_init(cio2
, q
);
1003 /* Start streaming on sensor */
1004 r
= v4l2_subdev_call(q
->sensor
, video
, s_stream
, 1);
1006 goto fail_csi2_subdev
;
1008 cio2
->streaming
= true;
1013 cio2_hw_exit(cio2
, q
);
1015 video_device_pipeline_stop(&q
->vdev
);
1017 dev_dbg(dev
, "failed to start streaming (%d)\n", r
);
1018 cio2_vb2_return_all_buffers(q
, VB2_BUF_STATE_QUEUED
);
1019 pm_runtime_put(dev
);
1024 static void cio2_vb2_stop_streaming(struct vb2_queue
*vq
)
1026 struct cio2_queue
*q
= vb2q_to_cio2_queue(vq
);
1027 struct cio2_device
*cio2
= vb2_get_drv_priv(vq
);
1028 struct device
*dev
= &cio2
->pci_dev
->dev
;
1030 if (v4l2_subdev_call(q
->sensor
, video
, s_stream
, 0))
1031 dev_err(dev
, "failed to stop sensor streaming\n");
1033 cio2_hw_exit(cio2
, q
);
1034 synchronize_irq(cio2
->pci_dev
->irq
);
1035 cio2_vb2_return_all_buffers(q
, VB2_BUF_STATE_ERROR
);
1036 video_device_pipeline_stop(&q
->vdev
);
1037 pm_runtime_put(dev
);
1038 cio2
->streaming
= false;
1041 static const struct vb2_ops cio2_vb2_ops
= {
1042 .buf_init
= cio2_vb2_buf_init
,
1043 .buf_queue
= cio2_vb2_buf_queue
,
1044 .buf_cleanup
= cio2_vb2_buf_cleanup
,
1045 .queue_setup
= cio2_vb2_queue_setup
,
1046 .start_streaming
= cio2_vb2_start_streaming
,
1047 .stop_streaming
= cio2_vb2_stop_streaming
,
1050 /**************** V4L2 interface ****************/
1052 static int cio2_v4l2_querycap(struct file
*file
, void *fh
,
1053 struct v4l2_capability
*cap
)
1055 strscpy(cap
->driver
, CIO2_NAME
, sizeof(cap
->driver
));
1056 strscpy(cap
->card
, CIO2_DEVICE_NAME
, sizeof(cap
->card
));
1061 static int cio2_v4l2_enum_fmt(struct file
*file
, void *fh
,
1062 struct v4l2_fmtdesc
*f
)
1064 if (f
->index
>= ARRAY_SIZE(formats
))
1067 f
->pixelformat
= formats
[f
->index
].fourcc
;
1072 /* The format is validated in cio2_video_link_validate() */
1073 static int cio2_v4l2_g_fmt(struct file
*file
, void *fh
, struct v4l2_format
*f
)
1075 struct cio2_queue
*q
= file_to_cio2_queue(file
);
1077 f
->fmt
.pix_mp
= q
->format
;
1082 static int cio2_v4l2_try_fmt(struct file
*file
, void *fh
, struct v4l2_format
*f
)
1084 const struct ipu3_cio2_fmt
*fmt
;
1085 struct v4l2_pix_format_mplane
*mpix
= &f
->fmt
.pix_mp
;
1087 fmt
= cio2_find_format(&mpix
->pixelformat
, NULL
);
1091 /* Only supports up to 4224x3136 */
1092 if (mpix
->width
> CIO2_IMAGE_MAX_WIDTH
)
1093 mpix
->width
= CIO2_IMAGE_MAX_WIDTH
;
1094 if (mpix
->height
> CIO2_IMAGE_MAX_HEIGHT
)
1095 mpix
->height
= CIO2_IMAGE_MAX_HEIGHT
;
1097 mpix
->num_planes
= 1;
1098 mpix
->pixelformat
= fmt
->fourcc
;
1099 mpix
->colorspace
= V4L2_COLORSPACE_RAW
;
1100 mpix
->field
= V4L2_FIELD_NONE
;
1101 mpix
->plane_fmt
[0].bytesperline
= cio2_bytesperline(mpix
->width
);
1102 mpix
->plane_fmt
[0].sizeimage
= mpix
->plane_fmt
[0].bytesperline
*
1106 mpix
->ycbcr_enc
= V4L2_YCBCR_ENC_DEFAULT
;
1107 mpix
->quantization
= V4L2_QUANTIZATION_DEFAULT
;
1108 mpix
->xfer_func
= V4L2_XFER_FUNC_DEFAULT
;
1113 static int cio2_v4l2_s_fmt(struct file
*file
, void *fh
, struct v4l2_format
*f
)
1115 struct cio2_queue
*q
= file_to_cio2_queue(file
);
1117 cio2_v4l2_try_fmt(file
, fh
, f
);
1118 q
->format
= f
->fmt
.pix_mp
;
1124 cio2_video_enum_input(struct file
*file
, void *fh
, struct v4l2_input
*input
)
1126 if (input
->index
> 0)
1129 strscpy(input
->name
, "camera", sizeof(input
->name
));
1130 input
->type
= V4L2_INPUT_TYPE_CAMERA
;
1136 cio2_video_g_input(struct file
*file
, void *fh
, unsigned int *input
)
1144 cio2_video_s_input(struct file
*file
, void *fh
, unsigned int input
)
1146 return input
== 0 ? 0 : -EINVAL
;
1149 static const struct v4l2_file_operations cio2_v4l2_fops
= {
1150 .owner
= THIS_MODULE
,
1151 .unlocked_ioctl
= video_ioctl2
,
1152 .open
= v4l2_fh_open
,
1153 .release
= vb2_fop_release
,
1154 .poll
= vb2_fop_poll
,
1155 .mmap
= vb2_fop_mmap
,
1158 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops
= {
1159 .vidioc_querycap
= cio2_v4l2_querycap
,
1160 .vidioc_enum_fmt_vid_cap
= cio2_v4l2_enum_fmt
,
1161 .vidioc_g_fmt_vid_cap_mplane
= cio2_v4l2_g_fmt
,
1162 .vidioc_s_fmt_vid_cap_mplane
= cio2_v4l2_s_fmt
,
1163 .vidioc_try_fmt_vid_cap_mplane
= cio2_v4l2_try_fmt
,
1164 .vidioc_reqbufs
= vb2_ioctl_reqbufs
,
1165 .vidioc_create_bufs
= vb2_ioctl_create_bufs
,
1166 .vidioc_prepare_buf
= vb2_ioctl_prepare_buf
,
1167 .vidioc_querybuf
= vb2_ioctl_querybuf
,
1168 .vidioc_qbuf
= vb2_ioctl_qbuf
,
1169 .vidioc_dqbuf
= vb2_ioctl_dqbuf
,
1170 .vidioc_streamon
= vb2_ioctl_streamon
,
1171 .vidioc_streamoff
= vb2_ioctl_streamoff
,
1172 .vidioc_expbuf
= vb2_ioctl_expbuf
,
1173 .vidioc_enum_input
= cio2_video_enum_input
,
1174 .vidioc_g_input
= cio2_video_g_input
,
1175 .vidioc_s_input
= cio2_video_s_input
,
1178 static int cio2_subdev_subscribe_event(struct v4l2_subdev
*sd
,
1180 struct v4l2_event_subscription
*sub
)
1182 if (sub
->type
!= V4L2_EVENT_FRAME_SYNC
)
1185 /* Line number. For now only zero accepted. */
1189 return v4l2_event_subscribe(fh
, sub
, 0, NULL
);
1192 static int cio2_subdev_open(struct v4l2_subdev
*sd
, struct v4l2_subdev_fh
*fh
)
1194 struct v4l2_mbus_framefmt
*format
;
1195 const struct v4l2_mbus_framefmt fmt_default
= {
1198 .code
= formats
[0].mbus_code
,
1199 .field
= V4L2_FIELD_NONE
,
1200 .colorspace
= V4L2_COLORSPACE_RAW
,
1201 .ycbcr_enc
= V4L2_YCBCR_ENC_DEFAULT
,
1202 .quantization
= V4L2_QUANTIZATION_DEFAULT
,
1203 .xfer_func
= V4L2_XFER_FUNC_DEFAULT
,
1206 /* Initialize try_fmt */
1207 format
= v4l2_subdev_state_get_format(fh
->state
, CIO2_PAD_SINK
);
1208 *format
= fmt_default
;
1211 format
= v4l2_subdev_state_get_format(fh
->state
, CIO2_PAD_SOURCE
);
1212 *format
= fmt_default
;
1217 static int cio2_subdev_get_fmt(struct v4l2_subdev
*sd
,
1218 struct v4l2_subdev_state
*sd_state
,
1219 struct v4l2_subdev_format
*fmt
)
1221 struct cio2_queue
*q
= container_of(sd
, struct cio2_queue
, subdev
);
1223 mutex_lock(&q
->subdev_lock
);
1225 if (fmt
->which
== V4L2_SUBDEV_FORMAT_TRY
)
1226 fmt
->format
= *v4l2_subdev_state_get_format(sd_state
,
1229 fmt
->format
= q
->subdev_fmt
;
1231 mutex_unlock(&q
->subdev_lock
);
1236 static int cio2_subdev_set_fmt(struct v4l2_subdev
*sd
,
1237 struct v4l2_subdev_state
*sd_state
,
1238 struct v4l2_subdev_format
*fmt
)
1240 struct cio2_queue
*q
= container_of(sd
, struct cio2_queue
, subdev
);
1241 struct v4l2_mbus_framefmt
*mbus
;
1242 u32 mbus_code
= fmt
->format
.code
;
1246 * Only allow setting sink pad format;
1247 * source always propagates from sink
1249 if (fmt
->pad
== CIO2_PAD_SOURCE
)
1250 return cio2_subdev_get_fmt(sd
, sd_state
, fmt
);
1252 if (fmt
->which
== V4L2_SUBDEV_FORMAT_TRY
)
1253 mbus
= v4l2_subdev_state_get_format(sd_state
, fmt
->pad
);
1255 mbus
= &q
->subdev_fmt
;
1257 fmt
->format
.code
= formats
[0].mbus_code
;
1259 for (i
= 0; i
< ARRAY_SIZE(formats
); i
++) {
1260 if (formats
[i
].mbus_code
== mbus_code
) {
1261 fmt
->format
.code
= mbus_code
;
1266 fmt
->format
.width
= min(fmt
->format
.width
, CIO2_IMAGE_MAX_WIDTH
);
1267 fmt
->format
.height
= min(fmt
->format
.height
, CIO2_IMAGE_MAX_HEIGHT
);
1268 fmt
->format
.field
= V4L2_FIELD_NONE
;
1270 mutex_lock(&q
->subdev_lock
);
1271 *mbus
= fmt
->format
;
1272 mutex_unlock(&q
->subdev_lock
);
1277 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev
*sd
,
1278 struct v4l2_subdev_state
*sd_state
,
1279 struct v4l2_subdev_mbus_code_enum
*code
)
1281 if (code
->index
>= ARRAY_SIZE(formats
))
1284 code
->code
= formats
[code
->index
].mbus_code
;
1288 static int cio2_subdev_link_validate_get_format(struct media_pad
*pad
,
1289 struct v4l2_subdev_format
*fmt
)
1291 if (is_media_entity_v4l2_subdev(pad
->entity
)) {
1292 struct v4l2_subdev
*sd
=
1293 media_entity_to_v4l2_subdev(pad
->entity
);
1295 memset(fmt
, 0, sizeof(*fmt
));
1296 fmt
->which
= V4L2_SUBDEV_FORMAT_ACTIVE
;
1297 fmt
->pad
= pad
->index
;
1298 return v4l2_subdev_call(sd
, pad
, get_fmt
, NULL
, fmt
);
1304 static int cio2_video_link_validate(struct media_link
*link
)
1306 struct media_entity
*entity
= link
->sink
->entity
;
1307 struct video_device
*vd
= media_entity_to_video_device(entity
);
1308 struct cio2_queue
*q
= container_of(vd
, struct cio2_queue
, vdev
);
1309 struct cio2_device
*cio2
= video_get_drvdata(vd
);
1310 struct device
*dev
= &cio2
->pci_dev
->dev
;
1311 struct v4l2_subdev_format source_fmt
;
1314 if (!media_pad_remote_pad_first(entity
->pads
)) {
1315 dev_info(dev
, "video node %s pad not connected\n", vd
->name
);
1319 ret
= cio2_subdev_link_validate_get_format(link
->source
, &source_fmt
);
1323 if (source_fmt
.format
.width
!= q
->format
.width
||
1324 source_fmt
.format
.height
!= q
->format
.height
) {
1325 dev_err(dev
, "Wrong width or height %ux%u (%ux%u expected)\n",
1326 q
->format
.width
, q
->format
.height
,
1327 source_fmt
.format
.width
, source_fmt
.format
.height
);
1331 if (!cio2_find_format(&q
->format
.pixelformat
, &source_fmt
.format
.code
))
1337 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops
= {
1338 .subscribe_event
= cio2_subdev_subscribe_event
,
1339 .unsubscribe_event
= v4l2_event_subdev_unsubscribe
,
1342 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops
= {
1343 .open
= cio2_subdev_open
,
1346 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops
= {
1347 .link_validate
= v4l2_subdev_link_validate_default
,
1348 .get_fmt
= cio2_subdev_get_fmt
,
1349 .set_fmt
= cio2_subdev_set_fmt
,
1350 .enum_mbus_code
= cio2_subdev_enum_mbus_code
,
1353 static const struct v4l2_subdev_ops cio2_subdev_ops
= {
1354 .core
= &cio2_subdev_core_ops
,
1355 .pad
= &cio2_subdev_pad_ops
,
1358 /******* V4L2 sub-device asynchronous registration callbacks***********/
1360 struct sensor_async_subdev
{
1361 struct v4l2_async_connection asd
;
1362 struct csi2_bus_info csi2
;
1365 #define to_sensor_asd(__asd) \
1366 container_of_const(__asd, struct sensor_async_subdev, asd)
1368 /* The .bound() notifier callback when a match is found */
1369 static int cio2_notifier_bound(struct v4l2_async_notifier
*notifier
,
1370 struct v4l2_subdev
*sd
,
1371 struct v4l2_async_connection
*asd
)
1373 struct cio2_device
*cio2
= to_cio2_device(notifier
);
1374 struct sensor_async_subdev
*s_asd
= to_sensor_asd(asd
);
1375 struct cio2_queue
*q
;
1378 if (cio2
->queue
[s_asd
->csi2
.port
].sensor
)
1381 ret
= ipu_bridge_instantiate_vcm(sd
->dev
);
1385 q
= &cio2
->queue
[s_asd
->csi2
.port
];
1387 q
->csi2
= s_asd
->csi2
;
1389 q
->csi_rx_base
= cio2
->base
+ CIO2_REG_PIPE_BASE(q
->csi2
.port
);
1394 /* The .unbind callback */
1395 static void cio2_notifier_unbind(struct v4l2_async_notifier
*notifier
,
1396 struct v4l2_subdev
*sd
,
1397 struct v4l2_async_connection
*asd
)
1399 struct cio2_device
*cio2
= to_cio2_device(notifier
);
1400 struct sensor_async_subdev
*s_asd
= to_sensor_asd(asd
);
1402 cio2
->queue
[s_asd
->csi2
.port
].sensor
= NULL
;
1405 /* .complete() is called after all subdevices have been located */
1406 static int cio2_notifier_complete(struct v4l2_async_notifier
*notifier
)
1408 struct cio2_device
*cio2
= to_cio2_device(notifier
);
1409 struct sensor_async_subdev
*s_asd
;
1410 struct v4l2_async_connection
*asd
;
1411 struct cio2_queue
*q
;
1414 list_for_each_entry(asd
, &cio2
->notifier
.done_list
, asc_entry
) {
1415 s_asd
= to_sensor_asd(asd
);
1416 q
= &cio2
->queue
[s_asd
->csi2
.port
];
1418 ret
= v4l2_create_fwnode_links_to_pad(asd
->sd
,
1419 &q
->subdev_pads
[CIO2_PAD_SINK
], 0);
1424 return v4l2_device_register_subdev_nodes(&cio2
->v4l2_dev
);
1427 static const struct v4l2_async_notifier_operations cio2_async_ops
= {
1428 .bound
= cio2_notifier_bound
,
1429 .unbind
= cio2_notifier_unbind
,
1430 .complete
= cio2_notifier_complete
,
1433 static int cio2_parse_firmware(struct cio2_device
*cio2
)
1435 struct device
*dev
= &cio2
->pci_dev
->dev
;
1439 for (i
= 0; i
< CIO2_NUM_PORTS
; i
++) {
1440 struct v4l2_fwnode_endpoint vep
= {
1441 .bus_type
= V4L2_MBUS_CSI2_DPHY
1443 struct sensor_async_subdev
*s_asd
;
1444 struct fwnode_handle
*ep
;
1446 ep
= fwnode_graph_get_endpoint_by_id(dev_fwnode(dev
), i
, 0,
1447 FWNODE_GRAPH_ENDPOINT_NEXT
);
1451 ret
= v4l2_fwnode_endpoint_parse(ep
, &vep
);
1455 s_asd
= v4l2_async_nf_add_fwnode_remote(&cio2
->notifier
, ep
,
1457 sensor_async_subdev
);
1458 if (IS_ERR(s_asd
)) {
1459 ret
= PTR_ERR(s_asd
);
1463 s_asd
->csi2
.port
= vep
.base
.port
;
1464 s_asd
->csi2
.lanes
= vep
.bus
.mipi_csi2
.num_data_lanes
;
1466 fwnode_handle_put(ep
);
1471 fwnode_handle_put(ep
);
1476 * Proceed even without sensors connected to allow the device to
1479 cio2
->notifier
.ops
= &cio2_async_ops
;
1480 ret
= v4l2_async_nf_register(&cio2
->notifier
);
1482 dev_err(dev
, "failed to register async notifier : %d\n", ret
);
1487 /**************** Queue initialization ****************/
1488 static const struct media_entity_operations cio2_media_ops
= {
1489 .link_validate
= v4l2_subdev_link_validate
,
1492 static const struct media_entity_operations cio2_video_entity_ops
= {
1493 .link_validate
= cio2_video_link_validate
,
1496 static int cio2_queue_init(struct cio2_device
*cio2
, struct cio2_queue
*q
)
1498 static const u32 default_width
= 1936;
1499 static const u32 default_height
= 1096;
1500 const struct ipu3_cio2_fmt dflt_fmt
= formats
[0];
1501 struct device
*dev
= &cio2
->pci_dev
->dev
;
1502 struct video_device
*vdev
= &q
->vdev
;
1503 struct vb2_queue
*vbq
= &q
->vbq
;
1504 struct v4l2_subdev
*subdev
= &q
->subdev
;
1505 struct v4l2_mbus_framefmt
*fmt
;
1508 /* Initialize miscellaneous variables */
1509 mutex_init(&q
->lock
);
1510 mutex_init(&q
->subdev_lock
);
1512 /* Initialize formats to default values */
1513 fmt
= &q
->subdev_fmt
;
1514 fmt
->width
= default_width
;
1515 fmt
->height
= default_height
;
1516 fmt
->code
= dflt_fmt
.mbus_code
;
1517 fmt
->field
= V4L2_FIELD_NONE
;
1519 q
->format
.width
= default_width
;
1520 q
->format
.height
= default_height
;
1521 q
->format
.pixelformat
= dflt_fmt
.fourcc
;
1522 q
->format
.colorspace
= V4L2_COLORSPACE_RAW
;
1523 q
->format
.field
= V4L2_FIELD_NONE
;
1524 q
->format
.num_planes
= 1;
1525 q
->format
.plane_fmt
[0].bytesperline
=
1526 cio2_bytesperline(q
->format
.width
);
1527 q
->format
.plane_fmt
[0].sizeimage
= q
->format
.plane_fmt
[0].bytesperline
*
1530 /* Initialize fbpt */
1531 r
= cio2_fbpt_init(cio2
, q
);
1535 /* Initialize media entities */
1536 q
->subdev_pads
[CIO2_PAD_SINK
].flags
= MEDIA_PAD_FL_SINK
|
1537 MEDIA_PAD_FL_MUST_CONNECT
;
1538 q
->subdev_pads
[CIO2_PAD_SOURCE
].flags
= MEDIA_PAD_FL_SOURCE
;
1539 subdev
->entity
.ops
= &cio2_media_ops
;
1540 subdev
->internal_ops
= &cio2_subdev_internal_ops
;
1541 r
= media_entity_pads_init(&subdev
->entity
, CIO2_PADS
, q
->subdev_pads
);
1543 dev_err(dev
, "failed initialize subdev media entity (%d)\n", r
);
1544 goto fail_subdev_media_entity
;
1547 q
->vdev_pad
.flags
= MEDIA_PAD_FL_SINK
| MEDIA_PAD_FL_MUST_CONNECT
;
1548 vdev
->entity
.ops
= &cio2_video_entity_ops
;
1549 r
= media_entity_pads_init(&vdev
->entity
, 1, &q
->vdev_pad
);
1551 dev_err(dev
, "failed initialize videodev media entity (%d)\n",
1553 goto fail_vdev_media_entity
;
1556 /* Initialize subdev */
1557 v4l2_subdev_init(subdev
, &cio2_subdev_ops
);
1558 subdev
->flags
= V4L2_SUBDEV_FL_HAS_DEVNODE
| V4L2_SUBDEV_FL_HAS_EVENTS
;
1559 subdev
->owner
= THIS_MODULE
;
1561 snprintf(subdev
->name
, sizeof(subdev
->name
),
1562 CIO2_ENTITY_NAME
" %td", q
- cio2
->queue
);
1563 subdev
->entity
.function
= MEDIA_ENT_F_VID_IF_BRIDGE
;
1564 v4l2_set_subdevdata(subdev
, cio2
);
1565 r
= v4l2_device_register_subdev(&cio2
->v4l2_dev
, subdev
);
1567 dev_err(dev
, "failed initialize subdev (%d)\n", r
);
1571 /* Initialize vbq */
1572 vbq
->type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
1573 vbq
->io_modes
= VB2_USERPTR
| VB2_MMAP
| VB2_DMABUF
;
1574 vbq
->ops
= &cio2_vb2_ops
;
1575 vbq
->mem_ops
= &vb2_dma_sg_memops
;
1576 vbq
->buf_struct_size
= sizeof(struct cio2_buffer
);
1577 vbq
->timestamp_flags
= V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
;
1578 vbq
->min_queued_buffers
= 1;
1579 vbq
->drv_priv
= cio2
;
1580 vbq
->lock
= &q
->lock
;
1581 r
= vb2_queue_init(vbq
);
1583 dev_err(dev
, "failed to initialize videobuf2 queue (%d)\n", r
);
1587 /* Initialize vdev */
1588 snprintf(vdev
->name
, sizeof(vdev
->name
),
1589 "%s %td", CIO2_NAME
, q
- cio2
->queue
);
1590 vdev
->release
= video_device_release_empty
;
1591 vdev
->fops
= &cio2_v4l2_fops
;
1592 vdev
->ioctl_ops
= &cio2_v4l2_ioctl_ops
;
1593 vdev
->lock
= &cio2
->lock
;
1594 vdev
->v4l2_dev
= &cio2
->v4l2_dev
;
1595 vdev
->queue
= &q
->vbq
;
1596 vdev
->device_caps
= V4L2_CAP_VIDEO_CAPTURE_MPLANE
| V4L2_CAP_STREAMING
;
1597 video_set_drvdata(vdev
, cio2
);
1598 r
= video_register_device(vdev
, VFL_TYPE_VIDEO
, -1);
1600 dev_err(dev
, "failed to register video device (%d)\n", r
);
1604 /* Create link from CIO2 subdev to output node */
1605 r
= media_create_pad_link(
1606 &subdev
->entity
, CIO2_PAD_SOURCE
, &vdev
->entity
, 0,
1607 MEDIA_LNK_FL_ENABLED
| MEDIA_LNK_FL_IMMUTABLE
);
1614 vb2_video_unregister_device(&q
->vdev
);
1616 v4l2_device_unregister_subdev(subdev
);
1618 media_entity_cleanup(&vdev
->entity
);
1619 fail_vdev_media_entity
:
1620 media_entity_cleanup(&subdev
->entity
);
1621 fail_subdev_media_entity
:
1622 cio2_fbpt_exit(q
, dev
);
1624 mutex_destroy(&q
->subdev_lock
);
1625 mutex_destroy(&q
->lock
);
1630 static void cio2_queue_exit(struct cio2_device
*cio2
, struct cio2_queue
*q
)
1632 vb2_video_unregister_device(&q
->vdev
);
1633 media_entity_cleanup(&q
->vdev
.entity
);
1634 v4l2_device_unregister_subdev(&q
->subdev
);
1635 media_entity_cleanup(&q
->subdev
.entity
);
1636 cio2_fbpt_exit(q
, &cio2
->pci_dev
->dev
);
1637 mutex_destroy(&q
->subdev_lock
);
1638 mutex_destroy(&q
->lock
);
1641 static int cio2_queues_init(struct cio2_device
*cio2
)
1645 for (i
= 0; i
< CIO2_QUEUES
; i
++) {
1646 r
= cio2_queue_init(cio2
, &cio2
->queue
[i
]);
1651 if (i
== CIO2_QUEUES
)
1654 for (i
--; i
>= 0; i
--)
1655 cio2_queue_exit(cio2
, &cio2
->queue
[i
]);
1660 static void cio2_queues_exit(struct cio2_device
*cio2
)
1664 for (i
= 0; i
< CIO2_QUEUES
; i
++)
1665 cio2_queue_exit(cio2
, &cio2
->queue
[i
]);
1668 /**************** PCI interface ****************/
1670 static int cio2_pci_probe(struct pci_dev
*pci_dev
,
1671 const struct pci_device_id
*id
)
1673 struct device
*dev
= &pci_dev
->dev
;
1674 struct cio2_device
*cio2
;
1678 * On some platforms no connections to sensors are defined in firmware,
1679 * if the device has no endpoints then we can try to build those as
1680 * software_nodes parsed from SSDB.
1682 r
= ipu_bridge_init(dev
, ipu_bridge_parse_ssdb
);
1686 cio2
= devm_kzalloc(dev
, sizeof(*cio2
), GFP_KERNEL
);
1689 cio2
->pci_dev
= pci_dev
;
1691 r
= pcim_enable_device(pci_dev
);
1693 dev_err(dev
, "failed to enable device (%d)\n", r
);
1697 dev_info(dev
, "device 0x%x (rev: 0x%x)\n",
1698 pci_dev
->device
, pci_dev
->revision
);
1700 r
= pcim_iomap_regions(pci_dev
, 1 << CIO2_PCI_BAR
, pci_name(pci_dev
));
1702 dev_err(dev
, "failed to remap I/O memory (%d)\n", r
);
1706 cio2
->base
= pcim_iomap_table(pci_dev
)[CIO2_PCI_BAR
];
1708 pci_set_drvdata(pci_dev
, cio2
);
1710 pci_set_master(pci_dev
);
1712 r
= dma_set_mask(&pci_dev
->dev
, CIO2_DMA_MASK
);
1714 dev_err(dev
, "failed to set DMA mask (%d)\n", r
);
1718 r
= pci_enable_msi(pci_dev
);
1720 dev_err(dev
, "failed to enable MSI (%d)\n", r
);
1724 r
= cio2_fbpt_init_dummy(cio2
);
1728 mutex_init(&cio2
->lock
);
1730 cio2
->media_dev
.dev
= dev
;
1731 strscpy(cio2
->media_dev
.model
, CIO2_DEVICE_NAME
,
1732 sizeof(cio2
->media_dev
.model
));
1733 cio2
->media_dev
.hw_revision
= 0;
1735 media_device_init(&cio2
->media_dev
);
1736 r
= media_device_register(&cio2
->media_dev
);
1738 goto fail_mutex_destroy
;
1740 cio2
->v4l2_dev
.mdev
= &cio2
->media_dev
;
1741 r
= v4l2_device_register(dev
, &cio2
->v4l2_dev
);
1743 dev_err(dev
, "failed to register V4L2 device (%d)\n", r
);
1744 goto fail_media_device_unregister
;
1747 r
= cio2_queues_init(cio2
);
1749 goto fail_v4l2_device_unregister
;
1751 v4l2_async_nf_init(&cio2
->notifier
, &cio2
->v4l2_dev
);
1753 r
= devm_request_irq(dev
, pci_dev
->irq
, cio2_irq
, IRQF_SHARED
,
1756 dev_err(dev
, "failed to request IRQ (%d)\n", r
);
1757 goto fail_clean_notifier
;
1760 /* Register notifier for subdevices we care */
1761 r
= cio2_parse_firmware(cio2
);
1763 goto fail_clean_notifier
;
1765 pm_runtime_put_noidle(dev
);
1766 pm_runtime_allow(dev
);
1770 fail_clean_notifier
:
1771 v4l2_async_nf_unregister(&cio2
->notifier
);
1772 v4l2_async_nf_cleanup(&cio2
->notifier
);
1773 cio2_queues_exit(cio2
);
1774 fail_v4l2_device_unregister
:
1775 v4l2_device_unregister(&cio2
->v4l2_dev
);
1776 fail_media_device_unregister
:
1777 media_device_unregister(&cio2
->media_dev
);
1778 media_device_cleanup(&cio2
->media_dev
);
1780 mutex_destroy(&cio2
->lock
);
1781 cio2_fbpt_exit_dummy(cio2
);
1786 static void cio2_pci_remove(struct pci_dev
*pci_dev
)
1788 struct cio2_device
*cio2
= pci_get_drvdata(pci_dev
);
1790 media_device_unregister(&cio2
->media_dev
);
1791 v4l2_async_nf_unregister(&cio2
->notifier
);
1792 v4l2_async_nf_cleanup(&cio2
->notifier
);
1793 cio2_queues_exit(cio2
);
1794 cio2_fbpt_exit_dummy(cio2
);
1795 v4l2_device_unregister(&cio2
->v4l2_dev
);
1796 media_device_cleanup(&cio2
->media_dev
);
1797 mutex_destroy(&cio2
->lock
);
1799 pm_runtime_forbid(&pci_dev
->dev
);
1800 pm_runtime_get_noresume(&pci_dev
->dev
);
1803 static int __maybe_unused
cio2_runtime_suspend(struct device
*dev
)
1805 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1806 struct cio2_device
*cio2
= pci_get_drvdata(pci_dev
);
1807 void __iomem
*const base
= cio2
->base
;
1809 writel(CIO2_D0I3C_I3
, base
+ CIO2_REG_D0I3C
);
1810 dev_dbg(dev
, "cio2 runtime suspend.\n");
1815 static int __maybe_unused
cio2_runtime_resume(struct device
*dev
)
1817 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1818 struct cio2_device
*cio2
= pci_get_drvdata(pci_dev
);
1819 void __iomem
*const base
= cio2
->base
;
1821 writel(CIO2_D0I3C_RR
, base
+ CIO2_REG_D0I3C
);
1822 dev_dbg(dev
, "cio2 runtime resume.\n");
1828 * Helper function to advance all the elements of a circular buffer by "start"
1831 static void arrange(void *ptr
, size_t elem_size
, size_t elems
, size_t start
)
1837 { start
, elems
- 1 },
1840 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1842 /* Loop as long as we have out-of-place entries */
1843 while (CHUNK_SIZE(&arr
[0]) && CHUNK_SIZE(&arr
[1])) {
1847 * Find the number of entries that can be arranged on this
1850 size0
= min(CHUNK_SIZE(&arr
[0]), CHUNK_SIZE(&arr
[1]));
1852 /* Swap the entries in two parts of the array. */
1853 for (i
= 0; i
< size0
; i
++) {
1854 u8
*d
= ptr
+ elem_size
* (arr
[1].begin
+ i
);
1855 u8
*s
= ptr
+ elem_size
* (arr
[0].begin
+ i
);
1858 for (j
= 0; j
< elem_size
; j
++)
1862 if (CHUNK_SIZE(&arr
[0]) > CHUNK_SIZE(&arr
[1])) {
1863 /* The end of the first array remains unarranged. */
1864 arr
[0].begin
+= size0
;
1867 * The first array is fully arranged so we proceed
1868 * handling the next one.
1870 arr
[0].begin
= arr
[1].begin
;
1871 arr
[0].end
= arr
[1].begin
+ size0
- 1;
1872 arr
[1].begin
+= size0
;
1877 static void cio2_fbpt_rearrange(struct cio2_device
*cio2
, struct cio2_queue
*q
)
1881 for (i
= 0, j
= q
->bufs_first
; i
< CIO2_MAX_BUFFERS
;
1882 i
++, j
= (j
+ 1) % CIO2_MAX_BUFFERS
)
1886 if (i
== CIO2_MAX_BUFFERS
)
1890 arrange(q
->fbpt
, sizeof(struct cio2_fbpt_entry
) * CIO2_MAX_LOPS
,
1891 CIO2_MAX_BUFFERS
, j
);
1892 arrange(q
->bufs
, sizeof(struct cio2_buffer
*),
1893 CIO2_MAX_BUFFERS
, j
);
1897 * DMA clears the valid bit when accessing the buffer.
1898 * When stopping stream in suspend callback, some of the buffers
1899 * may be in invalid state. After resume, when DMA meets the invalid
1900 * buffer, it will halt and stop receiving new data.
1901 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1903 for (i
= 0; i
< CIO2_MAX_BUFFERS
; i
++)
1904 cio2_fbpt_entry_enable(cio2
, q
->fbpt
+ i
* CIO2_MAX_LOPS
);
1907 static int __maybe_unused
cio2_suspend(struct device
*dev
)
1909 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1910 struct cio2_device
*cio2
= pci_get_drvdata(pci_dev
);
1911 struct cio2_queue
*q
= cio2
->cur_queue
;
1914 dev_dbg(dev
, "cio2 suspend\n");
1915 if (!cio2
->streaming
)
1919 r
= v4l2_subdev_call(q
->sensor
, video
, s_stream
, 0);
1921 dev_err(dev
, "failed to stop sensor streaming\n");
1925 cio2_hw_exit(cio2
, q
);
1926 synchronize_irq(pci_dev
->irq
);
1928 pm_runtime_force_suspend(dev
);
1931 * Upon resume, hw starts to process the fbpt entries from beginning,
1932 * so relocate the queued buffs to the fbpt head before suspend.
1934 cio2_fbpt_rearrange(cio2
, q
);
1941 static int __maybe_unused
cio2_resume(struct device
*dev
)
1943 struct cio2_device
*cio2
= dev_get_drvdata(dev
);
1944 struct cio2_queue
*q
= cio2
->cur_queue
;
1947 dev_dbg(dev
, "cio2 resume\n");
1948 if (!cio2
->streaming
)
1951 r
= pm_runtime_force_resume(dev
);
1953 dev_err(dev
, "failed to set power %d\n", r
);
1957 r
= cio2_hw_init(cio2
, q
);
1959 dev_err(dev
, "fail to init cio2 hw\n");
1963 r
= v4l2_subdev_call(q
->sensor
, video
, s_stream
, 1);
1965 dev_err(dev
, "fail to start sensor streaming\n");
1966 cio2_hw_exit(cio2
, q
);
1972 static const struct dev_pm_ops cio2_pm_ops
= {
1973 SET_RUNTIME_PM_OPS(&cio2_runtime_suspend
, &cio2_runtime_resume
, NULL
)
1974 SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend
, &cio2_resume
)
1977 static const struct pci_device_id cio2_pci_id_table
[] = {
1978 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
, CIO2_PCI_ID
) },
1982 MODULE_DEVICE_TABLE(pci
, cio2_pci_id_table
);
1984 static struct pci_driver cio2_pci_driver
= {
1986 .id_table
= cio2_pci_id_table
,
1987 .probe
= cio2_pci_probe
,
1988 .remove
= cio2_pci_remove
,
1994 module_pci_driver(cio2_pci_driver
);
1996 MODULE_AUTHOR("Tuukka Toivonen");
1997 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
1998 MODULE_AUTHOR("Jian Xu Zheng");
1999 MODULE_AUTHOR("Yuning Pu");
2000 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2001 MODULE_LICENSE("GPL v2");
2002 MODULE_DESCRIPTION("IPU3 CIO2 driver");
2003 MODULE_IMPORT_NS(INTEL_IPU_BRIDGE
);