2 * Copyright (c) 2017 Intel Corporation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * Based partially on Intel IPU4 driver written by
14 * Sakari Ailus <sakari.ailus@linux.intel.com>
15 * Samu Onkalo <samu.onkalo@intel.com>
16 * Jouni Högander <jouni.hogander@intel.com>
17 * Jouni Ukkonen <jouni.ukkonen@intel.com>
18 * Antti Laakso <antti.laakso@intel.com>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/property.h>
29 #include <linux/vmalloc.h>
30 #include <media/v4l2-ctrls.h>
31 #include <media/v4l2-device.h>
32 #include <media/v4l2-event.h>
33 #include <media/v4l2-fwnode.h>
34 #include <media/v4l2-ioctl.h>
35 #include <media/videobuf2-dma-sg.h>
37 #include "ipu3-cio2.h"
39 struct ipu3_cio2_fmt
{
46 * These are raw formats used in Intel's third generation of
47 * Image Processing Unit known as IPU3.
48 * 10bit raw bayer packed, 32 bytes for every 25 pixels,
49 * last LSB 6 bits unused.
51 static const struct ipu3_cio2_fmt formats
[] = {
52 { /* put default entry at beginning */
53 .mbus_code
= MEDIA_BUS_FMT_SGRBG10_1X10
,
54 .fourcc
= V4L2_PIX_FMT_IPU3_SGRBG10
,
57 .mbus_code
= MEDIA_BUS_FMT_SGBRG10_1X10
,
58 .fourcc
= V4L2_PIX_FMT_IPU3_SGBRG10
,
61 .mbus_code
= MEDIA_BUS_FMT_SBGGR10_1X10
,
62 .fourcc
= V4L2_PIX_FMT_IPU3_SBGGR10
,
65 .mbus_code
= MEDIA_BUS_FMT_SRGGB10_1X10
,
66 .fourcc
= V4L2_PIX_FMT_IPU3_SRGGB10
,
72 * cio2_find_format - lookup color format by fourcc or/and media bus code
73 * @pixelformat: fourcc to match, ignored if null
74 * @mbus_code: media bus code to match, ignored if null
76 static const struct ipu3_cio2_fmt
*cio2_find_format(const u32
*pixelformat
,
81 for (i
= 0; i
< ARRAY_SIZE(formats
); i
++) {
82 if (pixelformat
&& *pixelformat
!= formats
[i
].fourcc
)
84 if (mbus_code
&& *mbus_code
!= formats
[i
].mbus_code
)
93 static inline u32
cio2_bytesperline(const unsigned int width
)
96 * 64 bytes for every 50 pixels, the line length
97 * in bytes is multiple of 64 (line end alignment).
99 return DIV_ROUND_UP(width
, 50) * 64;
102 /**************** FBPT operations ****************/
104 static void cio2_fbpt_exit_dummy(struct cio2_device
*cio2
)
106 if (cio2
->dummy_lop
) {
107 dma_free_coherent(&cio2
->pci_dev
->dev
, CIO2_PAGE_SIZE
,
108 cio2
->dummy_lop
, cio2
->dummy_lop_bus_addr
);
109 cio2
->dummy_lop
= NULL
;
111 if (cio2
->dummy_page
) {
112 dma_free_coherent(&cio2
->pci_dev
->dev
, CIO2_PAGE_SIZE
,
113 cio2
->dummy_page
, cio2
->dummy_page_bus_addr
);
114 cio2
->dummy_page
= NULL
;
118 static int cio2_fbpt_init_dummy(struct cio2_device
*cio2
)
122 cio2
->dummy_page
= dma_alloc_coherent(&cio2
->pci_dev
->dev
,
124 &cio2
->dummy_page_bus_addr
,
126 cio2
->dummy_lop
= dma_alloc_coherent(&cio2
->pci_dev
->dev
,
128 &cio2
->dummy_lop_bus_addr
,
130 if (!cio2
->dummy_page
|| !cio2
->dummy_lop
) {
131 cio2_fbpt_exit_dummy(cio2
);
135 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
136 * Initialize each entry to dummy_page bus base address.
138 for (i
= 0; i
< CIO2_PAGE_SIZE
/ sizeof(*cio2
->dummy_lop
); i
++)
139 cio2
->dummy_lop
[i
] = cio2
->dummy_page_bus_addr
>> PAGE_SHIFT
;
144 static void cio2_fbpt_entry_enable(struct cio2_device
*cio2
,
145 struct cio2_fbpt_entry entry
[CIO2_MAX_LOPS
])
148 * The CPU first initializes some fields in fbpt, then sets
149 * the VALID bit, this barrier is to ensure that the DMA(device)
150 * does not see the VALID bit enabled before other fields are
151 * initialized; otherwise it could lead to havoc.
156 * Request interrupts for start and completion
157 * Valid bit is applicable only to 1st entry
159 entry
[0].first_entry
.ctrl
= CIO2_FBPT_CTRL_VALID
|
160 CIO2_FBPT_CTRL_IOC
| CIO2_FBPT_CTRL_IOS
;
163 /* Initialize fpbt entries to point to dummy frame */
164 static void cio2_fbpt_entry_init_dummy(struct cio2_device
*cio2
,
165 struct cio2_fbpt_entry
166 entry
[CIO2_MAX_LOPS
])
170 entry
[0].first_entry
.first_page_offset
= 0;
171 entry
[1].second_entry
.num_of_pages
=
172 CIO2_PAGE_SIZE
/ sizeof(u32
) * CIO2_MAX_LOPS
;
173 entry
[1].second_entry
.last_page_available_bytes
= CIO2_PAGE_SIZE
- 1;
175 for (i
= 0; i
< CIO2_MAX_LOPS
; i
++)
176 entry
[i
].lop_page_addr
= cio2
->dummy_lop_bus_addr
>> PAGE_SHIFT
;
178 cio2_fbpt_entry_enable(cio2
, entry
);
181 /* Initialize fpbt entries to point to a given buffer */
182 static void cio2_fbpt_entry_init_buf(struct cio2_device
*cio2
,
183 struct cio2_buffer
*b
,
184 struct cio2_fbpt_entry
185 entry
[CIO2_MAX_LOPS
])
187 struct vb2_buffer
*vb
= &b
->vbb
.vb2_buf
;
188 unsigned int length
= vb
->planes
[0].length
;
191 entry
[0].first_entry
.first_page_offset
= b
->offset
;
192 remaining
= length
+ entry
[0].first_entry
.first_page_offset
;
193 entry
[1].second_entry
.num_of_pages
=
194 DIV_ROUND_UP(remaining
, CIO2_PAGE_SIZE
);
196 * last_page_available_bytes has the offset of the last byte in the
197 * last page which is still accessible by DMA. DMA cannot access
198 * beyond this point. Valid range for this is from 0 to 4095.
199 * 0 indicates 1st byte in the page is DMA accessible.
200 * 4095 (CIO2_PAGE_SIZE - 1) means every single byte in the last page
201 * is available for DMA transfer.
203 entry
[1].second_entry
.last_page_available_bytes
=
204 (remaining
& ~PAGE_MASK
) ?
205 (remaining
& ~PAGE_MASK
) - 1 :
210 while (remaining
> 0) {
211 entry
->lop_page_addr
= b
->lop_bus_addr
[i
] >> PAGE_SHIFT
;
212 remaining
-= CIO2_PAGE_SIZE
/ sizeof(u32
) * CIO2_PAGE_SIZE
;
218 * The first not meaningful FBPT entry should point to a valid LOP
220 entry
->lop_page_addr
= cio2
->dummy_lop_bus_addr
>> PAGE_SHIFT
;
222 cio2_fbpt_entry_enable(cio2
, entry
);
225 static int cio2_fbpt_init(struct cio2_device
*cio2
, struct cio2_queue
*q
)
227 struct device
*dev
= &cio2
->pci_dev
->dev
;
229 q
->fbpt
= dma_alloc_coherent(dev
, CIO2_FBPT_SIZE
, &q
->fbpt_bus_addr
,
234 memset(q
->fbpt
, 0, CIO2_FBPT_SIZE
);
239 static void cio2_fbpt_exit(struct cio2_queue
*q
, struct device
*dev
)
241 dma_free_coherent(dev
, CIO2_FBPT_SIZE
, q
->fbpt
, q
->fbpt_bus_addr
);
244 /**************** CSI2 hardware setup ****************/
247 * The CSI2 receiver has several parameters affecting
248 * the receiver timings. These depend on the MIPI bus frequency
249 * F in Hz (sensor transmitter rate) as follows:
250 * register value = (A/1e9 + B * UI) / COUNT_ACC
252 * UI = 1 / (2 * F) in seconds
253 * COUNT_ACC = counter accuracy in seconds
254 * For IPU3 COUNT_ACC = 0.0625
256 * A and B are coefficients from the table below,
257 * depending whether the register minimum or maximum value is
261 * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
262 * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
264 * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
265 * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
266 * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
267 * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
268 * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
269 * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
270 * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
271 * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
273 * We use the minimum values of both A and B.
277 * shift for keeping value range suitable for 32-bit integer arithmetics
279 #define LIMIT_SHIFT 8
281 static s32
cio2_rx_timing(s32 a
, s32 b
, s64 freq
, int def
)
283 const u32 accinv
= 16; /* invert of counter resolution */
284 const u32 uiinv
= 500000000; /* 1e9 / 2 */
287 freq
>>= LIMIT_SHIFT
;
289 if (WARN_ON(freq
<= 0 || freq
> S32_MAX
))
292 * b could be 0, -2 or -8, so |accinv * b| is always
293 * less than (1 << ds) and thus |r| < 500000000.
295 r
= accinv
* b
* (uiinv
>> LIMIT_SHIFT
);
297 /* max value of a is 95 */
303 /* Calculate the the delay value for termination enable of clock lane HS Rx */
304 static int cio2_csi2_calc_timing(struct cio2_device
*cio2
, struct cio2_queue
*q
,
305 struct cio2_csi2_timing
*timing
)
307 struct device
*dev
= &cio2
->pci_dev
->dev
;
308 struct v4l2_querymenu qm
= {.id
= V4L2_CID_LINK_FREQ
, };
309 struct v4l2_ctrl
*link_freq
;
316 link_freq
= v4l2_ctrl_find(q
->sensor
->ctrl_handler
, V4L2_CID_LINK_FREQ
);
318 dev_err(dev
, "failed to find LINK_FREQ\n");
322 qm
.index
= v4l2_ctrl_g_ctrl(link_freq
);
323 r
= v4l2_querymenu(q
->sensor
->ctrl_handler
, &qm
);
325 dev_err(dev
, "failed to get menu item\n");
330 dev_err(dev
, "error invalid link_freq\n");
335 timing
->clk_termen
= cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A
,
336 CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B
,
338 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT
);
339 timing
->clk_settle
= cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A
,
340 CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B
,
342 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT
);
343 timing
->dat_termen
= cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A
,
344 CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B
,
346 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT
);
347 timing
->dat_settle
= cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A
,
348 CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B
,
350 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT
);
352 dev_dbg(dev
, "freq ct value is %d\n", timing
->clk_termen
);
353 dev_dbg(dev
, "freq cs value is %d\n", timing
->clk_settle
);
354 dev_dbg(dev
, "freq dt value is %d\n", timing
->dat_termen
);
355 dev_dbg(dev
, "freq ds value is %d\n", timing
->dat_settle
);
360 static int cio2_hw_init(struct cio2_device
*cio2
, struct cio2_queue
*q
)
362 static const int NUM_VCS
= 4;
363 static const int SID
; /* Stream id */
364 static const int ENTRY
;
365 static const int FBPT_WIDTH
= DIV_ROUND_UP(CIO2_MAX_LOPS
,
366 CIO2_FBPT_SUBENTRY_UNIT
);
367 const u32 num_buffers1
= CIO2_MAX_BUFFERS
- 1;
368 const struct ipu3_cio2_fmt
*fmt
;
369 void __iomem
*const base
= cio2
->base
;
370 u8 lanes
, csi2bus
= q
->csi2
.port
;
371 u8 sensor_vc
= SENSOR_VIR_CH_DFLT
;
372 struct cio2_csi2_timing timing
;
375 fmt
= cio2_find_format(NULL
, &q
->subdev_fmt
.code
);
379 lanes
= q
->csi2
.lanes
;
381 r
= cio2_csi2_calc_timing(cio2
, q
, &timing
);
385 writel(timing
.clk_termen
, q
->csi_rx_base
+
386 CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX
));
387 writel(timing
.clk_settle
, q
->csi_rx_base
+
388 CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX
));
390 for (i
= 0; i
< lanes
; i
++) {
391 writel(timing
.dat_termen
, q
->csi_rx_base
+
392 CIO2_REG_CSIRX_DLY_CNT_TERMEN(i
));
393 writel(timing
.dat_settle
, q
->csi_rx_base
+
394 CIO2_REG_CSIRX_DLY_CNT_SETTLE(i
));
397 writel(CIO2_PBM_WMCTRL1_MIN_2CK
|
398 CIO2_PBM_WMCTRL1_MID1_2CK
|
399 CIO2_PBM_WMCTRL1_MID2_2CK
, base
+ CIO2_REG_PBM_WMCTRL1
);
400 writel(CIO2_PBM_WMCTRL2_HWM_2CK
<< CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT
|
401 CIO2_PBM_WMCTRL2_LWM_2CK
<< CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT
|
402 CIO2_PBM_WMCTRL2_OBFFWM_2CK
<<
403 CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT
|
404 CIO2_PBM_WMCTRL2_TRANSDYN
<< CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT
|
405 CIO2_PBM_WMCTRL2_OBFF_MEM_EN
, base
+ CIO2_REG_PBM_WMCTRL2
);
406 writel(CIO2_PBM_ARB_CTRL_LANES_DIV
<<
407 CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT
|
408 CIO2_PBM_ARB_CTRL_LE_EN
|
409 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN
<<
410 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT
|
411 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP
<<
412 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT
,
413 base
+ CIO2_REG_PBM_ARB_CTRL
);
414 writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK
,
415 q
->csi_rx_base
+ CIO2_REG_CSIRX_STATUS_DLANE_HS
);
416 writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK
,
417 q
->csi_rx_base
+ CIO2_REG_CSIRX_STATUS_DLANE_LP
);
419 writel(CIO2_FB_HPLL_FREQ
, base
+ CIO2_REG_FB_HPLL_FREQ
);
420 writel(CIO2_ISCLK_RATIO
, base
+ CIO2_REG_ISCLK_RATIO
);
422 /* Configure MIPI backend */
423 for (i
= 0; i
< NUM_VCS
; i
++)
424 writel(1, q
->csi_rx_base
+ CIO2_REG_MIPIBE_SP_LUT_ENTRY(i
));
426 /* There are 16 short packet LUT entry */
427 for (i
= 0; i
< 16; i
++)
428 writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD
,
429 q
->csi_rx_base
+ CIO2_REG_MIPIBE_LP_LUT_ENTRY(i
));
430 writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD
,
431 q
->csi_rx_base
+ CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD
);
433 writel(CIO2_INT_EN_EXT_IE_MASK
, base
+ CIO2_REG_INT_EN_EXT_IE
);
434 writel(CIO2_IRQCTRL_MASK
, q
->csi_rx_base
+ CIO2_REG_IRQCTRL_MASK
);
435 writel(CIO2_IRQCTRL_MASK
, q
->csi_rx_base
+ CIO2_REG_IRQCTRL_ENABLE
);
436 writel(0, q
->csi_rx_base
+ CIO2_REG_IRQCTRL_EDGE
);
437 writel(0, q
->csi_rx_base
+ CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE
);
438 writel(CIO2_INT_EN_EXT_OE_MASK
, base
+ CIO2_REG_INT_EN_EXT_OE
);
440 writel(CIO2_REG_INT_EN_IRQ
| CIO2_INT_IOC(CIO2_DMA_CHAN
) |
441 CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN
),
442 base
+ CIO2_REG_INT_EN
);
444 writel((CIO2_PXM_PXF_FMT_CFG_BPP_10
| CIO2_PXM_PXF_FMT_CFG_PCK_64B
)
445 << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT
,
446 base
+ CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus
));
447 writel(SID
<< CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT
|
448 sensor_vc
<< CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT
|
449 fmt
->mipicode
<< CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT
,
450 q
->csi_rx_base
+ CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY
));
451 writel(0, q
->csi_rx_base
+ CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc
));
452 writel(0, q
->csi_rx_base
+ CIO2_REG_MIPIBE_FORCE_RAW8
);
453 writel(0, base
+ CIO2_REG_PXM_SID2BID0(csi2bus
));
455 writel(lanes
, q
->csi_rx_base
+ CIO2_REG_CSIRX_NOF_ENABLED_LANES
);
456 writel(CIO2_CGC_PRIM_TGE
|
460 CIO2_CGC_CSI2_INTERFRAME_TGE
|
461 CIO2_CGC_CSI2_PORT_DCGE
|
466 CIO2_CGC_CLKGATE_HOLDOFF
<< CIO2_CGC_CLKGATE_HOLDOFF_SHIFT
|
467 CIO2_CGC_CSI_CLKGATE_HOLDOFF
468 << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT
, base
+ CIO2_REG_CGC
);
469 writel(CIO2_LTRCTRL_LTRDYNEN
, base
+ CIO2_REG_LTRCTRL
);
470 writel(CIO2_LTRVAL0_VAL
<< CIO2_LTRVAL02_VAL_SHIFT
|
471 CIO2_LTRVAL0_SCALE
<< CIO2_LTRVAL02_SCALE_SHIFT
|
472 CIO2_LTRVAL1_VAL
<< CIO2_LTRVAL13_VAL_SHIFT
|
473 CIO2_LTRVAL1_SCALE
<< CIO2_LTRVAL13_SCALE_SHIFT
,
474 base
+ CIO2_REG_LTRVAL01
);
475 writel(CIO2_LTRVAL2_VAL
<< CIO2_LTRVAL02_VAL_SHIFT
|
476 CIO2_LTRVAL2_SCALE
<< CIO2_LTRVAL02_SCALE_SHIFT
|
477 CIO2_LTRVAL3_VAL
<< CIO2_LTRVAL13_VAL_SHIFT
|
478 CIO2_LTRVAL3_SCALE
<< CIO2_LTRVAL13_SCALE_SHIFT
,
479 base
+ CIO2_REG_LTRVAL23
);
481 for (i
= 0; i
< CIO2_NUM_DMA_CHAN
; i
++) {
482 writel(0, base
+ CIO2_REG_CDMABA(i
));
483 writel(0, base
+ CIO2_REG_CDMAC0(i
));
484 writel(0, base
+ CIO2_REG_CDMAC1(i
));
488 writel(q
->fbpt_bus_addr
>> PAGE_SHIFT
,
489 base
+ CIO2_REG_CDMABA(CIO2_DMA_CHAN
));
491 writel(num_buffers1
<< CIO2_CDMAC0_FBPT_LEN_SHIFT
|
492 FBPT_WIDTH
<< CIO2_CDMAC0_FBPT_WIDTH_SHIFT
|
493 CIO2_CDMAC0_DMA_INTR_ON_FE
|
494 CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL
|
496 CIO2_CDMAC0_DMA_INTR_ON_FS
|
497 CIO2_CDMAC0_DMA_HALTED
, base
+ CIO2_REG_CDMAC0(CIO2_DMA_CHAN
));
499 writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT
,
500 base
+ CIO2_REG_CDMAC1(CIO2_DMA_CHAN
));
502 writel(0, base
+ CIO2_REG_PBM_FOPN_ABORT
);
504 writel(CIO2_PXM_FRF_CFG_CRC_TH
<< CIO2_PXM_FRF_CFG_CRC_TH_SHIFT
|
505 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR
|
506 CIO2_PXM_FRF_CFG_MSK_ECC_RE
|
507 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE
,
508 base
+ CIO2_REG_PXM_FRF_CFG(q
->csi2
.port
));
510 /* Clear interrupts */
511 writel(CIO2_IRQCTRL_MASK
, q
->csi_rx_base
+ CIO2_REG_IRQCTRL_CLEAR
);
512 writel(~0, base
+ CIO2_REG_INT_STS_EXT_OE
);
513 writel(~0, base
+ CIO2_REG_INT_STS_EXT_IE
);
514 writel(~0, base
+ CIO2_REG_INT_STS
);
516 /* Enable devices, starting from the last device in the pipe */
517 writel(1, q
->csi_rx_base
+ CIO2_REG_MIPIBE_ENABLE
);
518 writel(1, q
->csi_rx_base
+ CIO2_REG_CSIRX_ENABLE
);
523 static void cio2_hw_exit(struct cio2_device
*cio2
, struct cio2_queue
*q
)
525 void __iomem
*base
= cio2
->base
;
526 unsigned int i
, maxloops
= 1000;
528 /* Disable CSI receiver and MIPI backend devices */
529 writel(0, q
->csi_rx_base
+ CIO2_REG_CSIRX_ENABLE
);
530 writel(0, q
->csi_rx_base
+ CIO2_REG_MIPIBE_ENABLE
);
533 writel(0, base
+ CIO2_REG_CDMAC0(CIO2_DMA_CHAN
));
535 if (readl(base
+ CIO2_REG_CDMAC0(CIO2_DMA_CHAN
)) &
536 CIO2_CDMAC0_DMA_HALTED
)
538 usleep_range(1000, 2000);
539 } while (--maxloops
);
541 dev_err(&cio2
->pci_dev
->dev
,
542 "DMA %i can not be halted\n", CIO2_DMA_CHAN
);
544 for (i
= 0; i
< CIO2_NUM_PORTS
; i
++) {
545 writel(readl(base
+ CIO2_REG_PXM_FRF_CFG(i
)) |
546 CIO2_PXM_FRF_CFG_ABORT
, base
+ CIO2_REG_PXM_FRF_CFG(i
));
547 writel(readl(base
+ CIO2_REG_PBM_FOPN_ABORT
) |
548 CIO2_PBM_FOPN_ABORT(i
), base
+ CIO2_REG_PBM_FOPN_ABORT
);
552 static void cio2_buffer_done(struct cio2_device
*cio2
, unsigned int dma_chan
)
554 struct device
*dev
= &cio2
->pci_dev
->dev
;
555 struct cio2_queue
*q
= cio2
->cur_queue
;
556 int buffers_found
= 0;
557 u64 ns
= ktime_get_ns();
559 if (dma_chan
>= CIO2_QUEUES
) {
560 dev_err(dev
, "bad DMA channel %i\n", dma_chan
);
564 /* Find out which buffer(s) are ready */
566 struct cio2_fbpt_entry
*const entry
=
567 &q
->fbpt
[q
->bufs_first
* CIO2_MAX_LOPS
];
568 struct cio2_buffer
*b
;
570 if (entry
->first_entry
.ctrl
& CIO2_FBPT_CTRL_VALID
)
573 b
= q
->bufs
[q
->bufs_first
];
575 unsigned int bytes
= entry
[1].second_entry
.num_of_bytes
;
577 q
->bufs
[q
->bufs_first
] = NULL
;
578 atomic_dec(&q
->bufs_queued
);
579 dev_dbg(&cio2
->pci_dev
->dev
,
580 "buffer %i done\n", b
->vbb
.vb2_buf
.index
);
582 b
->vbb
.vb2_buf
.timestamp
= ns
;
583 b
->vbb
.field
= V4L2_FIELD_NONE
;
584 b
->vbb
.sequence
= atomic_read(&q
->frame_sequence
);
585 if (b
->vbb
.vb2_buf
.planes
[0].length
!= bytes
)
586 dev_warn(dev
, "buffer length is %d received %d\n",
587 b
->vbb
.vb2_buf
.planes
[0].length
,
589 vb2_buffer_done(&b
->vbb
.vb2_buf
, VB2_BUF_STATE_DONE
);
591 atomic_inc(&q
->frame_sequence
);
592 cio2_fbpt_entry_init_dummy(cio2
, entry
);
593 q
->bufs_first
= (q
->bufs_first
+ 1) % CIO2_MAX_BUFFERS
;
597 if (buffers_found
== 0)
598 dev_warn(&cio2
->pci_dev
->dev
,
599 "no ready buffers found on DMA channel %u\n",
603 static void cio2_queue_event_sof(struct cio2_device
*cio2
, struct cio2_queue
*q
)
606 * For the user space camera control algorithms it is essential
607 * to know when the reception of a frame has begun. That's often
608 * the best timing information to get from the hardware.
610 struct v4l2_event event
= {
611 .type
= V4L2_EVENT_FRAME_SYNC
,
612 .u
.frame_sync
.frame_sequence
= atomic_read(&q
->frame_sequence
),
615 v4l2_event_queue(q
->subdev
.devnode
, &event
);
618 static const char *const cio2_irq_errs
[] = {
619 "single packet header error corrected",
620 "multiple packet header errors detected",
621 "payload checksum (CRC) error",
623 "reserved short packet data type detected",
624 "reserved long packet data type detected",
625 "incomplete long packet detected",
628 "DPHY start of transmission error",
629 "DPHY synchronization error",
631 "escape mode trigger event",
632 "escape mode ultra-low power state for data lane(s)",
633 "escape mode ultra-low power state exit for clock lane",
634 "inter-frame short packet discarded",
635 "inter-frame long packet discarded",
636 "non-matching Long Packet stalled",
639 static const char *const cio2_port_errs
[] = {
641 "DPHY not recoverable",
642 "ECC not recoverable",
649 static irqreturn_t
cio2_irq(int irq
, void *cio2_ptr
)
651 struct cio2_device
*cio2
= cio2_ptr
;
652 void __iomem
*const base
= cio2
->base
;
653 struct device
*dev
= &cio2
->pci_dev
->dev
;
654 u32 int_status
, int_clear
;
656 int_status
= readl(base
+ CIO2_REG_INT_STS
);
657 int_clear
= int_status
;
662 if (int_status
& CIO2_INT_IOOE
) {
664 * Interrupt on Output Error:
665 * 1) SRAM is full and FS received, or
666 * 2) An invalid bit detected by DMA.
668 u32 oe_status
, oe_clear
;
670 oe_clear
= readl(base
+ CIO2_REG_INT_STS_EXT_OE
);
671 oe_status
= oe_clear
;
673 if (oe_status
& CIO2_INT_EXT_OE_DMAOE_MASK
) {
674 dev_err(dev
, "DMA output error: 0x%x\n",
675 (oe_status
& CIO2_INT_EXT_OE_DMAOE_MASK
)
676 >> CIO2_INT_EXT_OE_DMAOE_SHIFT
);
677 oe_status
&= ~CIO2_INT_EXT_OE_DMAOE_MASK
;
679 if (oe_status
& CIO2_INT_EXT_OE_OES_MASK
) {
680 dev_err(dev
, "DMA output error on CSI2 buses: 0x%x\n",
681 (oe_status
& CIO2_INT_EXT_OE_OES_MASK
)
682 >> CIO2_INT_EXT_OE_OES_SHIFT
);
683 oe_status
&= ~CIO2_INT_EXT_OE_OES_MASK
;
685 writel(oe_clear
, base
+ CIO2_REG_INT_STS_EXT_OE
);
687 dev_warn(dev
, "unknown interrupt 0x%x on OE\n",
689 int_status
&= ~CIO2_INT_IOOE
;
692 if (int_status
& CIO2_INT_IOC_MASK
) {
693 /* DMA IO done -- frame ready */
697 for (d
= 0; d
< CIO2_NUM_DMA_CHAN
; d
++)
698 if (int_status
& CIO2_INT_IOC(d
)) {
699 clr
|= CIO2_INT_IOC(d
);
700 cio2_buffer_done(cio2
, d
);
705 if (int_status
& CIO2_INT_IOS_IOLN_MASK
) {
706 /* DMA IO starts or reached specified line */
710 for (d
= 0; d
< CIO2_NUM_DMA_CHAN
; d
++)
711 if (int_status
& CIO2_INT_IOS_IOLN(d
)) {
712 clr
|= CIO2_INT_IOS_IOLN(d
);
713 if (d
== CIO2_DMA_CHAN
)
714 cio2_queue_event_sof(cio2
,
720 if (int_status
& (CIO2_INT_IOIE
| CIO2_INT_IOIRQ
)) {
721 /* CSI2 receiver (error) interrupt */
722 u32 ie_status
, ie_clear
;
725 ie_clear
= readl(base
+ CIO2_REG_INT_STS_EXT_IE
);
726 ie_status
= ie_clear
;
728 for (port
= 0; port
< CIO2_NUM_PORTS
; port
++) {
729 u32 port_status
= (ie_status
>> (port
* 8)) & 0xff;
730 u32 err_mask
= BIT_MASK(ARRAY_SIZE(cio2_port_errs
)) - 1;
731 void __iomem
*const csi_rx_base
=
732 base
+ CIO2_REG_PIPE_BASE(port
);
735 while (port_status
& err_mask
) {
736 i
= ffs(port_status
) - 1;
737 dev_err(dev
, "port %i error %s\n",
738 port
, cio2_port_errs
[i
]);
739 ie_status
&= ~BIT(port
* 8 + i
);
740 port_status
&= ~BIT(i
);
743 if (ie_status
& CIO2_INT_EXT_IE_IRQ(port
)) {
744 u32 csi2_status
, csi2_clear
;
746 csi2_status
= readl(csi_rx_base
+
747 CIO2_REG_IRQCTRL_STATUS
);
748 csi2_clear
= csi2_status
;
750 BIT_MASK(ARRAY_SIZE(cio2_irq_errs
)) - 1;
752 while (csi2_status
& err_mask
) {
753 i
= ffs(csi2_status
) - 1;
755 "CSI-2 receiver port %i: %s\n",
756 port
, cio2_irq_errs
[i
]);
757 csi2_status
&= ~BIT(i
);
761 csi_rx_base
+ CIO2_REG_IRQCTRL_CLEAR
);
764 "unknown CSI2 error 0x%x on port %i\n",
767 ie_status
&= ~CIO2_INT_EXT_IE_IRQ(port
);
771 writel(ie_clear
, base
+ CIO2_REG_INT_STS_EXT_IE
);
773 dev_warn(dev
, "unknown interrupt 0x%x on IE\n",
776 int_status
&= ~(CIO2_INT_IOIE
| CIO2_INT_IOIRQ
);
779 writel(int_clear
, base
+ CIO2_REG_INT_STS
);
781 dev_warn(dev
, "unknown interrupt 0x%x on INT\n", int_status
);
786 /**************** Videobuf2 interface ****************/
788 static void cio2_vb2_return_all_buffers(struct cio2_queue
*q
,
789 enum vb2_buffer_state state
)
793 for (i
= 0; i
< CIO2_MAX_BUFFERS
; i
++) {
795 atomic_dec(&q
->bufs_queued
);
796 vb2_buffer_done(&q
->bufs
[i
]->vbb
.vb2_buf
,
802 static int cio2_vb2_queue_setup(struct vb2_queue
*vq
,
803 unsigned int *num_buffers
,
804 unsigned int *num_planes
,
805 unsigned int sizes
[],
806 struct device
*alloc_devs
[])
808 struct cio2_device
*cio2
= vb2_get_drv_priv(vq
);
809 struct cio2_queue
*q
= vb2q_to_cio2_queue(vq
);
812 *num_planes
= q
->format
.num_planes
;
814 for (i
= 0; i
< *num_planes
; ++i
) {
815 sizes
[i
] = q
->format
.plane_fmt
[i
].sizeimage
;
816 alloc_devs
[i
] = &cio2
->pci_dev
->dev
;
819 *num_buffers
= clamp_val(*num_buffers
, 1, CIO2_MAX_BUFFERS
);
821 /* Initialize buffer queue */
822 for (i
= 0; i
< CIO2_MAX_BUFFERS
; i
++) {
824 cio2_fbpt_entry_init_dummy(cio2
, &q
->fbpt
[i
* CIO2_MAX_LOPS
]);
826 atomic_set(&q
->bufs_queued
, 0);
833 /* Called after each buffer is allocated */
834 static int cio2_vb2_buf_init(struct vb2_buffer
*vb
)
836 struct cio2_device
*cio2
= vb2_get_drv_priv(vb
->vb2_queue
);
837 struct device
*dev
= &cio2
->pci_dev
->dev
;
838 struct cio2_buffer
*b
=
839 container_of(vb
, struct cio2_buffer
, vbb
.vb2_buf
);
840 static const unsigned int entries_per_page
=
841 CIO2_PAGE_SIZE
/ sizeof(u32
);
842 unsigned int pages
= DIV_ROUND_UP(vb
->planes
[0].length
, CIO2_PAGE_SIZE
);
843 unsigned int lops
= DIV_ROUND_UP(pages
+ 1, entries_per_page
);
845 struct sg_page_iter sg_iter
;
848 if (lops
<= 0 || lops
> CIO2_MAX_LOPS
) {
849 dev_err(dev
, "%s: bad buffer size (%i)\n", __func__
,
850 vb
->planes
[0].length
);
851 return -ENOSPC
; /* Should never happen */
854 memset(b
->lop
, 0, sizeof(b
->lop
));
855 /* Allocate LOP table */
856 for (i
= 0; i
< lops
; i
++) {
857 b
->lop
[i
] = dma_alloc_coherent(dev
, CIO2_PAGE_SIZE
,
858 &b
->lop_bus_addr
[i
], GFP_KERNEL
);
864 sg
= vb2_dma_sg_plane_desc(vb
, 0);
868 if (sg
->nents
&& sg
->sgl
)
869 b
->offset
= sg
->sgl
->offset
;
872 for_each_sg_page(sg
->sgl
, &sg_iter
, sg
->nents
, 0) {
875 b
->lop
[i
][j
] = sg_page_iter_dma_address(&sg_iter
) >> PAGE_SHIFT
;
877 if (j
== entries_per_page
) {
883 b
->lop
[i
][j
] = cio2
->dummy_page_bus_addr
>> PAGE_SHIFT
;
886 for (i
--; i
>= 0; i
--)
887 dma_free_coherent(dev
, CIO2_PAGE_SIZE
,
888 b
->lop
[i
], b
->lop_bus_addr
[i
]);
892 /* Transfer buffer ownership to cio2 */
893 static void cio2_vb2_buf_queue(struct vb2_buffer
*vb
)
895 struct cio2_device
*cio2
= vb2_get_drv_priv(vb
->vb2_queue
);
896 struct cio2_queue
*q
=
897 container_of(vb
->vb2_queue
, struct cio2_queue
, vbq
);
898 struct cio2_buffer
*b
=
899 container_of(vb
, struct cio2_buffer
, vbb
.vb2_buf
);
900 struct cio2_fbpt_entry
*entry
;
902 unsigned int i
, j
, next
= q
->bufs_next
;
903 int bufs_queued
= atomic_inc_return(&q
->bufs_queued
);
906 dev_dbg(&cio2
->pci_dev
->dev
, "queue buffer %d\n", vb
->index
);
909 * This code queues the buffer to the CIO2 DMA engine, which starts
910 * running once streaming has started. It is possible that this code
911 * gets pre-empted due to increased CPU load. Upon this, the driver
912 * does not get an opportunity to queue new buffers to the CIO2 DMA
913 * engine. When the DMA engine encounters an FBPT entry without the
914 * VALID bit set, the DMA engine halts, which requires a restart of
915 * the DMA engine and sensor, to continue streaming.
916 * This is not desired and is highly unlikely given that there are
917 * 32 FBPT entries that the DMA engine needs to process, to run into
918 * an FBPT entry, without the VALID bit set. We try to mitigate this
919 * by disabling interrupts for the duration of this queueing.
921 local_irq_save(flags
);
923 fbpt_rp
= (readl(cio2
->base
+ CIO2_REG_CDMARI(CIO2_DMA_CHAN
))
924 >> CIO2_CDMARI_FBPT_RP_SHIFT
)
925 & CIO2_CDMARI_FBPT_RP_MASK
;
928 * fbpt_rp is the fbpt entry that the dma is currently working
929 * on, but since it could jump to next entry at any time,
930 * assume that we might already be there.
932 fbpt_rp
= (fbpt_rp
+ 1) % CIO2_MAX_BUFFERS
;
934 if (bufs_queued
<= 1 || fbpt_rp
== next
)
935 /* Buffers were drained */
936 next
= (fbpt_rp
+ 1) % CIO2_MAX_BUFFERS
;
938 for (i
= 0; i
< CIO2_MAX_BUFFERS
; i
++) {
940 * We have allocated CIO2_MAX_BUFFERS circularly for the
941 * hw, the user has requested N buffer queue. The driver
942 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
943 * user queues a buffer, there necessarily is a free buffer.
945 if (!q
->bufs
[next
]) {
947 entry
= &q
->fbpt
[next
* CIO2_MAX_LOPS
];
948 cio2_fbpt_entry_init_buf(cio2
, b
, entry
);
949 local_irq_restore(flags
);
950 q
->bufs_next
= (next
+ 1) % CIO2_MAX_BUFFERS
;
951 for (j
= 0; j
< vb
->num_planes
; j
++)
952 vb2_set_plane_payload(vb
, j
,
953 q
->format
.plane_fmt
[j
].sizeimage
);
957 dev_dbg(&cio2
->pci_dev
->dev
, "entry %i was full!\n", next
);
958 next
= (next
+ 1) % CIO2_MAX_BUFFERS
;
961 local_irq_restore(flags
);
962 dev_err(&cio2
->pci_dev
->dev
, "error: all cio2 entries were full!\n");
963 atomic_dec(&q
->bufs_queued
);
964 vb2_buffer_done(vb
, VB2_BUF_STATE_ERROR
);
967 /* Called when each buffer is freed */
968 static void cio2_vb2_buf_cleanup(struct vb2_buffer
*vb
)
970 struct cio2_device
*cio2
= vb2_get_drv_priv(vb
->vb2_queue
);
971 struct cio2_buffer
*b
=
972 container_of(vb
, struct cio2_buffer
, vbb
.vb2_buf
);
976 for (i
= 0; i
< CIO2_MAX_LOPS
; i
++) {
978 dma_free_coherent(&cio2
->pci_dev
->dev
, CIO2_PAGE_SIZE
,
979 b
->lop
[i
], b
->lop_bus_addr
[i
]);
983 static int cio2_vb2_start_streaming(struct vb2_queue
*vq
, unsigned int count
)
985 struct cio2_queue
*q
= vb2q_to_cio2_queue(vq
);
986 struct cio2_device
*cio2
= vb2_get_drv_priv(vq
);
990 atomic_set(&q
->frame_sequence
, 0);
992 r
= pm_runtime_get_sync(&cio2
->pci_dev
->dev
);
994 dev_info(&cio2
->pci_dev
->dev
, "failed to set power %d\n", r
);
995 pm_runtime_put_noidle(&cio2
->pci_dev
->dev
);
999 r
= media_pipeline_start(&q
->vdev
.entity
, &q
->pipe
);
1003 r
= cio2_hw_init(cio2
, q
);
1007 /* Start streaming on sensor */
1008 r
= v4l2_subdev_call(q
->sensor
, video
, s_stream
, 1);
1010 goto fail_csi2_subdev
;
1012 cio2
->streaming
= true;
1017 cio2_hw_exit(cio2
, q
);
1019 media_pipeline_stop(&q
->vdev
.entity
);
1021 dev_dbg(&cio2
->pci_dev
->dev
, "failed to start streaming (%d)\n", r
);
1022 cio2_vb2_return_all_buffers(q
, VB2_BUF_STATE_QUEUED
);
1023 pm_runtime_put(&cio2
->pci_dev
->dev
);
1028 static void cio2_vb2_stop_streaming(struct vb2_queue
*vq
)
1030 struct cio2_queue
*q
= vb2q_to_cio2_queue(vq
);
1031 struct cio2_device
*cio2
= vb2_get_drv_priv(vq
);
1033 if (v4l2_subdev_call(q
->sensor
, video
, s_stream
, 0))
1034 dev_err(&cio2
->pci_dev
->dev
,
1035 "failed to stop sensor streaming\n");
1037 cio2_hw_exit(cio2
, q
);
1038 cio2_vb2_return_all_buffers(q
, VB2_BUF_STATE_ERROR
);
1039 media_pipeline_stop(&q
->vdev
.entity
);
1040 pm_runtime_put(&cio2
->pci_dev
->dev
);
1041 cio2
->streaming
= false;
1044 static const struct vb2_ops cio2_vb2_ops
= {
1045 .buf_init
= cio2_vb2_buf_init
,
1046 .buf_queue
= cio2_vb2_buf_queue
,
1047 .buf_cleanup
= cio2_vb2_buf_cleanup
,
1048 .queue_setup
= cio2_vb2_queue_setup
,
1049 .start_streaming
= cio2_vb2_start_streaming
,
1050 .stop_streaming
= cio2_vb2_stop_streaming
,
1051 .wait_prepare
= vb2_ops_wait_prepare
,
1052 .wait_finish
= vb2_ops_wait_finish
,
1055 /**************** V4L2 interface ****************/
1057 static int cio2_v4l2_querycap(struct file
*file
, void *fh
,
1058 struct v4l2_capability
*cap
)
1060 struct cio2_device
*cio2
= video_drvdata(file
);
1062 strlcpy(cap
->driver
, CIO2_NAME
, sizeof(cap
->driver
));
1063 strlcpy(cap
->card
, CIO2_DEVICE_NAME
, sizeof(cap
->card
));
1064 snprintf(cap
->bus_info
, sizeof(cap
->bus_info
),
1065 "PCI:%s", pci_name(cio2
->pci_dev
));
1070 static int cio2_v4l2_enum_fmt(struct file
*file
, void *fh
,
1071 struct v4l2_fmtdesc
*f
)
1073 if (f
->index
>= ARRAY_SIZE(formats
))
1076 f
->pixelformat
= formats
[f
->index
].fourcc
;
1081 /* The format is validated in cio2_video_link_validate() */
1082 static int cio2_v4l2_g_fmt(struct file
*file
, void *fh
, struct v4l2_format
*f
)
1084 struct cio2_queue
*q
= file_to_cio2_queue(file
);
1086 f
->fmt
.pix_mp
= q
->format
;
1091 static int cio2_v4l2_try_fmt(struct file
*file
, void *fh
, struct v4l2_format
*f
)
1093 const struct ipu3_cio2_fmt
*fmt
;
1094 struct v4l2_pix_format_mplane
*mpix
= &f
->fmt
.pix_mp
;
1096 fmt
= cio2_find_format(&mpix
->pixelformat
, NULL
);
1100 /* Only supports up to 4224x3136 */
1101 if (mpix
->width
> CIO2_IMAGE_MAX_WIDTH
)
1102 mpix
->width
= CIO2_IMAGE_MAX_WIDTH
;
1103 if (mpix
->height
> CIO2_IMAGE_MAX_LENGTH
)
1104 mpix
->height
= CIO2_IMAGE_MAX_LENGTH
;
1106 mpix
->num_planes
= 1;
1107 mpix
->pixelformat
= fmt
->fourcc
;
1108 mpix
->colorspace
= V4L2_COLORSPACE_RAW
;
1109 mpix
->field
= V4L2_FIELD_NONE
;
1110 memset(mpix
->reserved
, 0, sizeof(mpix
->reserved
));
1111 mpix
->plane_fmt
[0].bytesperline
= cio2_bytesperline(mpix
->width
);
1112 mpix
->plane_fmt
[0].sizeimage
= mpix
->plane_fmt
[0].bytesperline
*
1114 memset(mpix
->plane_fmt
[0].reserved
, 0,
1115 sizeof(mpix
->plane_fmt
[0].reserved
));
1118 mpix
->ycbcr_enc
= V4L2_YCBCR_ENC_DEFAULT
;
1119 mpix
->quantization
= V4L2_QUANTIZATION_DEFAULT
;
1120 mpix
->xfer_func
= V4L2_XFER_FUNC_DEFAULT
;
1125 static int cio2_v4l2_s_fmt(struct file
*file
, void *fh
, struct v4l2_format
*f
)
1127 struct cio2_queue
*q
= file_to_cio2_queue(file
);
1129 cio2_v4l2_try_fmt(file
, fh
, f
);
1130 q
->format
= f
->fmt
.pix_mp
;
1136 cio2_video_enum_input(struct file
*file
, void *fh
, struct v4l2_input
*input
)
1138 if (input
->index
> 0)
1141 strlcpy(input
->name
, "camera", sizeof(input
->name
));
1142 input
->type
= V4L2_INPUT_TYPE_CAMERA
;
1148 cio2_video_g_input(struct file
*file
, void *fh
, unsigned int *input
)
1156 cio2_video_s_input(struct file
*file
, void *fh
, unsigned int input
)
1158 return input
== 0 ? 0 : -EINVAL
;
1161 static const struct v4l2_file_operations cio2_v4l2_fops
= {
1162 .owner
= THIS_MODULE
,
1163 .unlocked_ioctl
= video_ioctl2
,
1164 .open
= v4l2_fh_open
,
1165 .release
= vb2_fop_release
,
1166 .poll
= vb2_fop_poll
,
1167 .mmap
= vb2_fop_mmap
,
1170 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops
= {
1171 .vidioc_querycap
= cio2_v4l2_querycap
,
1172 .vidioc_enum_fmt_vid_cap_mplane
= cio2_v4l2_enum_fmt
,
1173 .vidioc_g_fmt_vid_cap_mplane
= cio2_v4l2_g_fmt
,
1174 .vidioc_s_fmt_vid_cap_mplane
= cio2_v4l2_s_fmt
,
1175 .vidioc_try_fmt_vid_cap_mplane
= cio2_v4l2_try_fmt
,
1176 .vidioc_reqbufs
= vb2_ioctl_reqbufs
,
1177 .vidioc_create_bufs
= vb2_ioctl_create_bufs
,
1178 .vidioc_prepare_buf
= vb2_ioctl_prepare_buf
,
1179 .vidioc_querybuf
= vb2_ioctl_querybuf
,
1180 .vidioc_qbuf
= vb2_ioctl_qbuf
,
1181 .vidioc_dqbuf
= vb2_ioctl_dqbuf
,
1182 .vidioc_streamon
= vb2_ioctl_streamon
,
1183 .vidioc_streamoff
= vb2_ioctl_streamoff
,
1184 .vidioc_expbuf
= vb2_ioctl_expbuf
,
1185 .vidioc_enum_input
= cio2_video_enum_input
,
1186 .vidioc_g_input
= cio2_video_g_input
,
1187 .vidioc_s_input
= cio2_video_s_input
,
1190 static int cio2_subdev_subscribe_event(struct v4l2_subdev
*sd
,
1192 struct v4l2_event_subscription
*sub
)
1194 if (sub
->type
!= V4L2_EVENT_FRAME_SYNC
)
1197 /* Line number. For now only zero accepted. */
1201 return v4l2_event_subscribe(fh
, sub
, 0, NULL
);
1204 static int cio2_subdev_open(struct v4l2_subdev
*sd
, struct v4l2_subdev_fh
*fh
)
1206 struct v4l2_mbus_framefmt
*format
;
1207 const struct v4l2_mbus_framefmt fmt_default
= {
1210 .code
= formats
[0].mbus_code
,
1211 .field
= V4L2_FIELD_NONE
,
1212 .colorspace
= V4L2_COLORSPACE_RAW
,
1213 .ycbcr_enc
= V4L2_YCBCR_ENC_DEFAULT
,
1214 .quantization
= V4L2_QUANTIZATION_DEFAULT
,
1215 .xfer_func
= V4L2_XFER_FUNC_DEFAULT
,
1218 /* Initialize try_fmt */
1219 format
= v4l2_subdev_get_try_format(sd
, fh
->pad
, CIO2_PAD_SINK
);
1220 *format
= fmt_default
;
1223 format
= v4l2_subdev_get_try_format(sd
, fh
->pad
, CIO2_PAD_SOURCE
);
1224 *format
= fmt_default
;
1230 * cio2_subdev_get_fmt - Handle get format by pads subdev method
1231 * @sd : pointer to v4l2 subdev structure
1232 * @cfg: V4L2 subdev pad config
1233 * @fmt: pointer to v4l2 subdev format structure
1234 * return -EINVAL or zero on success
1236 static int cio2_subdev_get_fmt(struct v4l2_subdev
*sd
,
1237 struct v4l2_subdev_pad_config
*cfg
,
1238 struct v4l2_subdev_format
*fmt
)
1240 struct cio2_queue
*q
= container_of(sd
, struct cio2_queue
, subdev
);
1241 struct v4l2_subdev_format format
;
1244 if (fmt
->which
== V4L2_SUBDEV_FORMAT_TRY
) {
1245 fmt
->format
= *v4l2_subdev_get_try_format(sd
, cfg
, fmt
->pad
);
1249 if (fmt
->pad
== CIO2_PAD_SINK
) {
1250 format
.which
= V4L2_SUBDEV_FORMAT_ACTIVE
;
1251 ret
= v4l2_subdev_call(sd
, pad
, get_fmt
, NULL
,
1256 /* update colorspace etc */
1257 q
->subdev_fmt
.colorspace
= format
.format
.colorspace
;
1258 q
->subdev_fmt
.ycbcr_enc
= format
.format
.ycbcr_enc
;
1259 q
->subdev_fmt
.quantization
= format
.format
.quantization
;
1260 q
->subdev_fmt
.xfer_func
= format
.format
.xfer_func
;
1263 fmt
->format
= q
->subdev_fmt
;
1269 * cio2_subdev_set_fmt - Handle set format by pads subdev method
1270 * @sd : pointer to v4l2 subdev structure
1271 * @cfg: V4L2 subdev pad config
1272 * @fmt: pointer to v4l2 subdev format structure
1273 * return -EINVAL or zero on success
1275 static int cio2_subdev_set_fmt(struct v4l2_subdev
*sd
,
1276 struct v4l2_subdev_pad_config
*cfg
,
1277 struct v4l2_subdev_format
*fmt
)
1279 struct cio2_queue
*q
= container_of(sd
, struct cio2_queue
, subdev
);
1282 * Only allow setting sink pad format;
1283 * source always propagates from sink
1285 if (fmt
->pad
== CIO2_PAD_SOURCE
)
1286 return cio2_subdev_get_fmt(sd
, cfg
, fmt
);
1288 if (fmt
->which
== V4L2_SUBDEV_FORMAT_TRY
) {
1289 *v4l2_subdev_get_try_format(sd
, cfg
, fmt
->pad
) = fmt
->format
;
1291 /* It's the sink, allow changing frame size */
1292 q
->subdev_fmt
.width
= fmt
->format
.width
;
1293 q
->subdev_fmt
.height
= fmt
->format
.height
;
1294 q
->subdev_fmt
.code
= fmt
->format
.code
;
1295 fmt
->format
= q
->subdev_fmt
;
1301 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev
*sd
,
1302 struct v4l2_subdev_pad_config
*cfg
,
1303 struct v4l2_subdev_mbus_code_enum
*code
)
1305 if (code
->index
>= ARRAY_SIZE(formats
))
1308 code
->code
= formats
[code
->index
].mbus_code
;
1312 static int cio2_subdev_link_validate_get_format(struct media_pad
*pad
,
1313 struct v4l2_subdev_format
*fmt
)
1315 if (is_media_entity_v4l2_subdev(pad
->entity
)) {
1316 struct v4l2_subdev
*sd
=
1317 media_entity_to_v4l2_subdev(pad
->entity
);
1319 fmt
->which
= V4L2_SUBDEV_FORMAT_ACTIVE
;
1320 fmt
->pad
= pad
->index
;
1321 return v4l2_subdev_call(sd
, pad
, get_fmt
, NULL
, fmt
);
1327 static int cio2_video_link_validate(struct media_link
*link
)
1329 struct video_device
*vd
= container_of(link
->sink
->entity
,
1330 struct video_device
, entity
);
1331 struct cio2_queue
*q
= container_of(vd
, struct cio2_queue
, vdev
);
1332 struct cio2_device
*cio2
= video_get_drvdata(vd
);
1333 struct v4l2_subdev_format source_fmt
;
1336 if (!media_entity_remote_pad(link
->sink
->entity
->pads
)) {
1337 dev_info(&cio2
->pci_dev
->dev
,
1338 "video node %s pad not connected\n", vd
->name
);
1342 ret
= cio2_subdev_link_validate_get_format(link
->source
, &source_fmt
);
1346 if (source_fmt
.format
.width
!= q
->format
.width
||
1347 source_fmt
.format
.height
!= q
->format
.height
) {
1348 dev_err(&cio2
->pci_dev
->dev
,
1349 "Wrong width or height %ux%u (%ux%u expected)\n",
1350 q
->format
.width
, q
->format
.height
,
1351 source_fmt
.format
.width
, source_fmt
.format
.height
);
1355 if (!cio2_find_format(&q
->format
.pixelformat
, &source_fmt
.format
.code
))
1361 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops
= {
1362 .subscribe_event
= cio2_subdev_subscribe_event
,
1363 .unsubscribe_event
= v4l2_event_subdev_unsubscribe
,
1366 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops
= {
1367 .open
= cio2_subdev_open
,
1370 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops
= {
1371 .link_validate
= v4l2_subdev_link_validate_default
,
1372 .get_fmt
= cio2_subdev_get_fmt
,
1373 .set_fmt
= cio2_subdev_set_fmt
,
1374 .enum_mbus_code
= cio2_subdev_enum_mbus_code
,
1377 static const struct v4l2_subdev_ops cio2_subdev_ops
= {
1378 .core
= &cio2_subdev_core_ops
,
1379 .pad
= &cio2_subdev_pad_ops
,
1382 /******* V4L2 sub-device asynchronous registration callbacks***********/
1384 struct sensor_async_subdev
{
1385 struct v4l2_async_subdev asd
;
1386 struct csi2_bus_info csi2
;
1389 /* The .bound() notifier callback when a match is found */
1390 static int cio2_notifier_bound(struct v4l2_async_notifier
*notifier
,
1391 struct v4l2_subdev
*sd
,
1392 struct v4l2_async_subdev
*asd
)
1394 struct cio2_device
*cio2
= container_of(notifier
,
1395 struct cio2_device
, notifier
);
1396 struct sensor_async_subdev
*s_asd
= container_of(asd
,
1397 struct sensor_async_subdev
, asd
);
1398 struct cio2_queue
*q
;
1400 if (cio2
->queue
[s_asd
->csi2
.port
].sensor
)
1403 q
= &cio2
->queue
[s_asd
->csi2
.port
];
1405 q
->csi2
= s_asd
->csi2
;
1407 q
->csi_rx_base
= cio2
->base
+ CIO2_REG_PIPE_BASE(q
->csi2
.port
);
1412 /* The .unbind callback */
1413 static void cio2_notifier_unbind(struct v4l2_async_notifier
*notifier
,
1414 struct v4l2_subdev
*sd
,
1415 struct v4l2_async_subdev
*asd
)
1417 struct cio2_device
*cio2
= container_of(notifier
,
1418 struct cio2_device
, notifier
);
1419 struct sensor_async_subdev
*s_asd
= container_of(asd
,
1420 struct sensor_async_subdev
, asd
);
1422 cio2
->queue
[s_asd
->csi2
.port
].sensor
= NULL
;
1425 /* .complete() is called after all subdevices have been located */
1426 static int cio2_notifier_complete(struct v4l2_async_notifier
*notifier
)
1428 struct cio2_device
*cio2
= container_of(notifier
, struct cio2_device
,
1430 struct sensor_async_subdev
*s_asd
;
1431 struct cio2_queue
*q
;
1432 unsigned int i
, pad
;
1435 for (i
= 0; i
< notifier
->num_subdevs
; i
++) {
1436 s_asd
= container_of(cio2
->notifier
.subdevs
[i
],
1437 struct sensor_async_subdev
, asd
);
1438 q
= &cio2
->queue
[s_asd
->csi2
.port
];
1440 for (pad
= 0; pad
< q
->sensor
->entity
.num_pads
; pad
++)
1441 if (q
->sensor
->entity
.pads
[pad
].flags
&
1442 MEDIA_PAD_FL_SOURCE
)
1445 if (pad
== q
->sensor
->entity
.num_pads
) {
1446 dev_err(&cio2
->pci_dev
->dev
,
1447 "failed to find src pad for %s\n",
1452 ret
= media_create_pad_link(
1453 &q
->sensor
->entity
, pad
,
1454 &q
->subdev
.entity
, CIO2_PAD_SINK
,
1457 dev_err(&cio2
->pci_dev
->dev
,
1458 "failed to create link for %s\n",
1459 cio2
->queue
[i
].sensor
->name
);
1464 return v4l2_device_register_subdev_nodes(&cio2
->v4l2_dev
);
1467 static const struct v4l2_async_notifier_operations cio2_async_ops
= {
1468 .bound
= cio2_notifier_bound
,
1469 .unbind
= cio2_notifier_unbind
,
1470 .complete
= cio2_notifier_complete
,
1473 static int cio2_fwnode_parse(struct device
*dev
,
1474 struct v4l2_fwnode_endpoint
*vep
,
1475 struct v4l2_async_subdev
*asd
)
1477 struct sensor_async_subdev
*s_asd
=
1478 container_of(asd
, struct sensor_async_subdev
, asd
);
1480 if (vep
->bus_type
!= V4L2_MBUS_CSI2
) {
1481 dev_err(dev
, "Only CSI2 bus type is currently supported\n");
1485 s_asd
->csi2
.port
= vep
->base
.port
;
1486 s_asd
->csi2
.lanes
= vep
->bus
.mipi_csi2
.num_data_lanes
;
1491 static int cio2_notifier_init(struct cio2_device
*cio2
)
1495 ret
= v4l2_async_notifier_parse_fwnode_endpoints(
1496 &cio2
->pci_dev
->dev
, &cio2
->notifier
,
1497 sizeof(struct sensor_async_subdev
),
1502 if (!cio2
->notifier
.num_subdevs
)
1503 return -ENODEV
; /* no endpoint */
1505 cio2
->notifier
.ops
= &cio2_async_ops
;
1506 ret
= v4l2_async_notifier_register(&cio2
->v4l2_dev
, &cio2
->notifier
);
1508 dev_err(&cio2
->pci_dev
->dev
,
1509 "failed to register async notifier : %d\n", ret
);
1510 v4l2_async_notifier_cleanup(&cio2
->notifier
);
1516 static void cio2_notifier_exit(struct cio2_device
*cio2
)
1518 v4l2_async_notifier_unregister(&cio2
->notifier
);
1519 v4l2_async_notifier_cleanup(&cio2
->notifier
);
1522 /**************** Queue initialization ****************/
1523 static const struct media_entity_operations cio2_media_ops
= {
1524 .link_validate
= v4l2_subdev_link_validate
,
1527 static const struct media_entity_operations cio2_video_entity_ops
= {
1528 .link_validate
= cio2_video_link_validate
,
1531 static int cio2_queue_init(struct cio2_device
*cio2
, struct cio2_queue
*q
)
1533 static const u32 default_width
= 1936;
1534 static const u32 default_height
= 1096;
1535 const struct ipu3_cio2_fmt dflt_fmt
= formats
[0];
1537 struct video_device
*vdev
= &q
->vdev
;
1538 struct vb2_queue
*vbq
= &q
->vbq
;
1539 struct v4l2_subdev
*subdev
= &q
->subdev
;
1540 struct v4l2_mbus_framefmt
*fmt
;
1543 /* Initialize miscellaneous variables */
1544 mutex_init(&q
->lock
);
1546 /* Initialize formats to default values */
1547 fmt
= &q
->subdev_fmt
;
1548 fmt
->width
= default_width
;
1549 fmt
->height
= default_height
;
1550 fmt
->code
= dflt_fmt
.mbus_code
;
1551 fmt
->field
= V4L2_FIELD_NONE
;
1553 q
->format
.width
= default_width
;
1554 q
->format
.height
= default_height
;
1555 q
->format
.pixelformat
= dflt_fmt
.fourcc
;
1556 q
->format
.colorspace
= V4L2_COLORSPACE_RAW
;
1557 q
->format
.field
= V4L2_FIELD_NONE
;
1558 q
->format
.num_planes
= 1;
1559 q
->format
.plane_fmt
[0].bytesperline
=
1560 cio2_bytesperline(q
->format
.width
);
1561 q
->format
.plane_fmt
[0].sizeimage
= q
->format
.plane_fmt
[0].bytesperline
*
1564 /* Initialize fbpt */
1565 r
= cio2_fbpt_init(cio2
, q
);
1569 /* Initialize media entities */
1570 q
->subdev_pads
[CIO2_PAD_SINK
].flags
= MEDIA_PAD_FL_SINK
|
1571 MEDIA_PAD_FL_MUST_CONNECT
;
1572 q
->subdev_pads
[CIO2_PAD_SOURCE
].flags
= MEDIA_PAD_FL_SOURCE
;
1573 subdev
->entity
.ops
= &cio2_media_ops
;
1574 subdev
->internal_ops
= &cio2_subdev_internal_ops
;
1575 r
= media_entity_pads_init(&subdev
->entity
, CIO2_PADS
, q
->subdev_pads
);
1577 dev_err(&cio2
->pci_dev
->dev
,
1578 "failed initialize subdev media entity (%d)\n", r
);
1579 goto fail_subdev_media_entity
;
1582 q
->vdev_pad
.flags
= MEDIA_PAD_FL_SINK
| MEDIA_PAD_FL_MUST_CONNECT
;
1583 vdev
->entity
.ops
= &cio2_video_entity_ops
;
1584 r
= media_entity_pads_init(&vdev
->entity
, 1, &q
->vdev_pad
);
1586 dev_err(&cio2
->pci_dev
->dev
,
1587 "failed initialize videodev media entity (%d)\n", r
);
1588 goto fail_vdev_media_entity
;
1591 /* Initialize subdev */
1592 v4l2_subdev_init(subdev
, &cio2_subdev_ops
);
1593 subdev
->flags
= V4L2_SUBDEV_FL_HAS_DEVNODE
| V4L2_SUBDEV_FL_HAS_EVENTS
;
1594 subdev
->owner
= THIS_MODULE
;
1595 snprintf(subdev
->name
, sizeof(subdev
->name
),
1596 CIO2_ENTITY_NAME
" %td", q
- cio2
->queue
);
1597 v4l2_set_subdevdata(subdev
, cio2
);
1598 r
= v4l2_device_register_subdev(&cio2
->v4l2_dev
, subdev
);
1600 dev_err(&cio2
->pci_dev
->dev
,
1601 "failed initialize subdev (%d)\n", r
);
1605 /* Initialize vbq */
1606 vbq
->type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
1607 vbq
->io_modes
= VB2_USERPTR
| VB2_MMAP
| VB2_DMABUF
;
1608 vbq
->ops
= &cio2_vb2_ops
;
1609 vbq
->mem_ops
= &vb2_dma_sg_memops
;
1610 vbq
->buf_struct_size
= sizeof(struct cio2_buffer
);
1611 vbq
->timestamp_flags
= V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
;
1612 vbq
->min_buffers_needed
= 1;
1613 vbq
->drv_priv
= cio2
;
1614 vbq
->lock
= &q
->lock
;
1615 r
= vb2_queue_init(vbq
);
1617 dev_err(&cio2
->pci_dev
->dev
,
1618 "failed to initialize videobuf2 queue (%d)\n", r
);
1622 /* Initialize vdev */
1623 snprintf(vdev
->name
, sizeof(vdev
->name
),
1624 "%s %td", CIO2_NAME
, q
- cio2
->queue
);
1625 vdev
->release
= video_device_release_empty
;
1626 vdev
->fops
= &cio2_v4l2_fops
;
1627 vdev
->ioctl_ops
= &cio2_v4l2_ioctl_ops
;
1628 vdev
->lock
= &cio2
->lock
;
1629 vdev
->v4l2_dev
= &cio2
->v4l2_dev
;
1630 vdev
->queue
= &q
->vbq
;
1631 vdev
->device_caps
= V4L2_CAP_VIDEO_CAPTURE_MPLANE
| V4L2_CAP_STREAMING
;
1632 video_set_drvdata(vdev
, cio2
);
1633 r
= video_register_device(vdev
, VFL_TYPE_GRABBER
, -1);
1635 dev_err(&cio2
->pci_dev
->dev
,
1636 "failed to register video device (%d)\n", r
);
1640 /* Create link from CIO2 subdev to output node */
1641 r
= media_create_pad_link(
1642 &subdev
->entity
, CIO2_PAD_SOURCE
, &vdev
->entity
, 0,
1643 MEDIA_LNK_FL_ENABLED
| MEDIA_LNK_FL_IMMUTABLE
);
1650 video_unregister_device(&q
->vdev
);
1652 vb2_queue_release(vbq
);
1654 v4l2_device_unregister_subdev(subdev
);
1656 media_entity_cleanup(&vdev
->entity
);
1657 fail_vdev_media_entity
:
1658 media_entity_cleanup(&subdev
->entity
);
1659 fail_subdev_media_entity
:
1660 cio2_fbpt_exit(q
, &cio2
->pci_dev
->dev
);
1662 mutex_destroy(&q
->lock
);
1667 static void cio2_queue_exit(struct cio2_device
*cio2
, struct cio2_queue
*q
)
1669 video_unregister_device(&q
->vdev
);
1670 media_entity_cleanup(&q
->vdev
.entity
);
1671 vb2_queue_release(&q
->vbq
);
1672 v4l2_device_unregister_subdev(&q
->subdev
);
1673 media_entity_cleanup(&q
->subdev
.entity
);
1674 cio2_fbpt_exit(q
, &cio2
->pci_dev
->dev
);
1675 mutex_destroy(&q
->lock
);
1678 static int cio2_queues_init(struct cio2_device
*cio2
)
1682 for (i
= 0; i
< CIO2_QUEUES
; i
++) {
1683 r
= cio2_queue_init(cio2
, &cio2
->queue
[i
]);
1688 if (i
== CIO2_QUEUES
)
1691 for (i
--; i
>= 0; i
--)
1692 cio2_queue_exit(cio2
, &cio2
->queue
[i
]);
1697 static void cio2_queues_exit(struct cio2_device
*cio2
)
1701 for (i
= 0; i
< CIO2_QUEUES
; i
++)
1702 cio2_queue_exit(cio2
, &cio2
->queue
[i
]);
1705 /**************** PCI interface ****************/
1707 static int cio2_pci_config_setup(struct pci_dev
*dev
)
1710 int r
= pci_enable_msi(dev
);
1713 dev_err(&dev
->dev
, "failed to enable MSI (%d)\n", r
);
1717 pci_read_config_word(dev
, PCI_COMMAND
, &pci_command
);
1718 pci_command
|= PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
|
1719 PCI_COMMAND_INTX_DISABLE
;
1720 pci_write_config_word(dev
, PCI_COMMAND
, pci_command
);
1725 static int cio2_pci_probe(struct pci_dev
*pci_dev
,
1726 const struct pci_device_id
*id
)
1728 struct cio2_device
*cio2
;
1729 void __iomem
*const *iomap
;
1732 cio2
= devm_kzalloc(&pci_dev
->dev
, sizeof(*cio2
), GFP_KERNEL
);
1735 cio2
->pci_dev
= pci_dev
;
1737 r
= pcim_enable_device(pci_dev
);
1739 dev_err(&pci_dev
->dev
, "failed to enable device (%d)\n", r
);
1743 dev_info(&pci_dev
->dev
, "device 0x%x (rev: 0x%x)\n",
1744 pci_dev
->device
, pci_dev
->revision
);
1746 r
= pcim_iomap_regions(pci_dev
, 1 << CIO2_PCI_BAR
, pci_name(pci_dev
));
1748 dev_err(&pci_dev
->dev
, "failed to remap I/O memory (%d)\n", r
);
1752 iomap
= pcim_iomap_table(pci_dev
);
1754 dev_err(&pci_dev
->dev
, "failed to iomap table\n");
1758 cio2
->base
= iomap
[CIO2_PCI_BAR
];
1760 pci_set_drvdata(pci_dev
, cio2
);
1762 pci_set_master(pci_dev
);
1764 r
= pci_set_dma_mask(pci_dev
, CIO2_DMA_MASK
);
1766 dev_err(&pci_dev
->dev
, "failed to set DMA mask (%d)\n", r
);
1770 r
= cio2_pci_config_setup(pci_dev
);
1774 r
= cio2_fbpt_init_dummy(cio2
);
1778 mutex_init(&cio2
->lock
);
1780 cio2
->media_dev
.dev
= &cio2
->pci_dev
->dev
;
1781 strlcpy(cio2
->media_dev
.model
, CIO2_DEVICE_NAME
,
1782 sizeof(cio2
->media_dev
.model
));
1783 snprintf(cio2
->media_dev
.bus_info
, sizeof(cio2
->media_dev
.bus_info
),
1784 "PCI:%s", pci_name(cio2
->pci_dev
));
1785 cio2
->media_dev
.hw_revision
= 0;
1787 media_device_init(&cio2
->media_dev
);
1788 r
= media_device_register(&cio2
->media_dev
);
1790 goto fail_mutex_destroy
;
1792 cio2
->v4l2_dev
.mdev
= &cio2
->media_dev
;
1793 r
= v4l2_device_register(&pci_dev
->dev
, &cio2
->v4l2_dev
);
1795 dev_err(&pci_dev
->dev
,
1796 "failed to register V4L2 device (%d)\n", r
);
1797 goto fail_media_device_unregister
;
1800 r
= cio2_queues_init(cio2
);
1802 goto fail_v4l2_device_unregister
;
1804 /* Register notifier for subdevices we care */
1805 r
= cio2_notifier_init(cio2
);
1807 goto fail_cio2_queue_exit
;
1809 r
= devm_request_irq(&pci_dev
->dev
, pci_dev
->irq
, cio2_irq
,
1810 IRQF_SHARED
, CIO2_NAME
, cio2
);
1812 dev_err(&pci_dev
->dev
, "failed to request IRQ (%d)\n", r
);
1816 pm_runtime_put_noidle(&pci_dev
->dev
);
1817 pm_runtime_allow(&pci_dev
->dev
);
1822 cio2_notifier_exit(cio2
);
1823 fail_cio2_queue_exit
:
1824 cio2_queues_exit(cio2
);
1825 fail_v4l2_device_unregister
:
1826 v4l2_device_unregister(&cio2
->v4l2_dev
);
1827 fail_media_device_unregister
:
1828 media_device_unregister(&cio2
->media_dev
);
1829 media_device_cleanup(&cio2
->media_dev
);
1831 mutex_destroy(&cio2
->lock
);
1832 cio2_fbpt_exit_dummy(cio2
);
1837 static void cio2_pci_remove(struct pci_dev
*pci_dev
)
1839 struct cio2_device
*cio2
= pci_get_drvdata(pci_dev
);
1842 cio2_notifier_exit(cio2
);
1843 cio2_fbpt_exit_dummy(cio2
);
1844 for (i
= 0; i
< CIO2_QUEUES
; i
++)
1845 cio2_queue_exit(cio2
, &cio2
->queue
[i
]);
1846 v4l2_device_unregister(&cio2
->v4l2_dev
);
1847 media_device_unregister(&cio2
->media_dev
);
1848 media_device_cleanup(&cio2
->media_dev
);
1849 mutex_destroy(&cio2
->lock
);
1852 static int __maybe_unused
cio2_runtime_suspend(struct device
*dev
)
1854 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1855 struct cio2_device
*cio2
= pci_get_drvdata(pci_dev
);
1856 void __iomem
*const base
= cio2
->base
;
1859 writel(CIO2_D0I3C_I3
, base
+ CIO2_REG_D0I3C
);
1860 dev_dbg(dev
, "cio2 runtime suspend.\n");
1862 pci_read_config_word(pci_dev
, pci_dev
->pm_cap
+ CIO2_PMCSR_OFFSET
, &pm
);
1863 pm
= (pm
>> CIO2_PMCSR_D0D3_SHIFT
) << CIO2_PMCSR_D0D3_SHIFT
;
1864 pm
|= CIO2_PMCSR_D3
;
1865 pci_write_config_word(pci_dev
, pci_dev
->pm_cap
+ CIO2_PMCSR_OFFSET
, pm
);
1870 static int __maybe_unused
cio2_runtime_resume(struct device
*dev
)
1872 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1873 struct cio2_device
*cio2
= pci_get_drvdata(pci_dev
);
1874 void __iomem
*const base
= cio2
->base
;
1877 writel(CIO2_D0I3C_RR
, base
+ CIO2_REG_D0I3C
);
1878 dev_dbg(dev
, "cio2 runtime resume.\n");
1880 pci_read_config_word(pci_dev
, pci_dev
->pm_cap
+ CIO2_PMCSR_OFFSET
, &pm
);
1881 pm
= (pm
>> CIO2_PMCSR_D0D3_SHIFT
) << CIO2_PMCSR_D0D3_SHIFT
;
1882 pci_write_config_word(pci_dev
, pci_dev
->pm_cap
+ CIO2_PMCSR_OFFSET
, pm
);
1888 * Helper function to advance all the elements of a circular buffer by "start"
1891 static void arrange(void *ptr
, size_t elem_size
, size_t elems
, size_t start
)
1897 { start
, elems
- 1 },
1900 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1902 /* Loop as long as we have out-of-place entries */
1903 while (CHUNK_SIZE(&arr
[0]) && CHUNK_SIZE(&arr
[1])) {
1907 * Find the number of entries that can be arranged on this
1910 size0
= min(CHUNK_SIZE(&arr
[0]), CHUNK_SIZE(&arr
[1]));
1912 /* Swap the entries in two parts of the array. */
1913 for (i
= 0; i
< size0
; i
++) {
1914 u8
*d
= ptr
+ elem_size
* (arr
[1].begin
+ i
);
1915 u8
*s
= ptr
+ elem_size
* (arr
[0].begin
+ i
);
1918 for (j
= 0; j
< elem_size
; j
++)
1922 if (CHUNK_SIZE(&arr
[0]) > CHUNK_SIZE(&arr
[1])) {
1923 /* The end of the first array remains unarranged. */
1924 arr
[0].begin
+= size0
;
1927 * The first array is fully arranged so we proceed
1928 * handling the next one.
1930 arr
[0].begin
= arr
[1].begin
;
1931 arr
[0].end
= arr
[1].begin
+ size0
- 1;
1932 arr
[1].begin
+= size0
;
1937 static void cio2_fbpt_rearrange(struct cio2_device
*cio2
, struct cio2_queue
*q
)
1941 for (i
= 0, j
= q
->bufs_first
; i
< CIO2_MAX_BUFFERS
;
1942 i
++, j
= (j
+ 1) % CIO2_MAX_BUFFERS
)
1946 if (i
== CIO2_MAX_BUFFERS
)
1950 arrange(q
->fbpt
, sizeof(struct cio2_fbpt_entry
) * CIO2_MAX_LOPS
,
1951 CIO2_MAX_BUFFERS
, j
);
1952 arrange(q
->bufs
, sizeof(struct cio2_buffer
*),
1953 CIO2_MAX_BUFFERS
, j
);
1957 * DMA clears the valid bit when accessing the buffer.
1958 * When stopping stream in suspend callback, some of the buffers
1959 * may be in invalid state. After resume, when DMA meets the invalid
1960 * buffer, it will halt and stop receiving new data.
1961 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1963 for (i
= 0; i
< CIO2_MAX_BUFFERS
; i
++)
1964 cio2_fbpt_entry_enable(cio2
, q
->fbpt
+ i
* CIO2_MAX_LOPS
);
1967 static int __maybe_unused
cio2_suspend(struct device
*dev
)
1969 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1970 struct cio2_device
*cio2
= pci_get_drvdata(pci_dev
);
1971 struct cio2_queue
*q
= cio2
->cur_queue
;
1973 dev_dbg(dev
, "cio2 suspend\n");
1974 if (!cio2
->streaming
)
1978 cio2_hw_exit(cio2
, q
);
1980 pm_runtime_force_suspend(dev
);
1983 * Upon resume, hw starts to process the fbpt entries from beginning,
1984 * so relocate the queued buffs to the fbpt head before suspend.
1986 cio2_fbpt_rearrange(cio2
, q
);
1993 static int __maybe_unused
cio2_resume(struct device
*dev
)
1995 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1996 struct cio2_device
*cio2
= pci_get_drvdata(pci_dev
);
1998 struct cio2_queue
*q
= cio2
->cur_queue
;
2000 dev_dbg(dev
, "cio2 resume\n");
2001 if (!cio2
->streaming
)
2004 r
= pm_runtime_force_resume(&cio2
->pci_dev
->dev
);
2006 dev_err(&cio2
->pci_dev
->dev
,
2007 "failed to set power %d\n", r
);
2011 r
= cio2_hw_init(cio2
, q
);
2013 dev_err(dev
, "fail to init cio2 hw\n");
2018 static const struct dev_pm_ops cio2_pm_ops
= {
2019 SET_RUNTIME_PM_OPS(&cio2_runtime_suspend
, &cio2_runtime_resume
, NULL
)
2020 SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend
, &cio2_resume
)
2023 static const struct pci_device_id cio2_pci_id_table
[] = {
2024 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
, CIO2_PCI_ID
) },
2028 MODULE_DEVICE_TABLE(pci
, cio2_pci_id_table
);
2030 static struct pci_driver cio2_pci_driver
= {
2032 .id_table
= cio2_pci_id_table
,
2033 .probe
= cio2_pci_probe
,
2034 .remove
= cio2_pci_remove
,
2040 module_pci_driver(cio2_pci_driver
);
2042 MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
2043 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
2044 MODULE_AUTHOR("Jian Xu Zheng <jian.xu.zheng@intel.com>");
2045 MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
2046 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2047 MODULE_LICENSE("GPL v2");
2048 MODULE_DESCRIPTION("IPU3 CIO2 driver");