perf tools: Don't clone maps from parent when synthesizing forks
[linux/fpc-iii.git] / drivers / media / platform / ti-vpe / cal.c
blobd1febe5baa6dd9b45c2482e1e2d32c373bbd355f
1 /*
2 * TI CAL camera interface driver
4 * Copyright (c) 2015 Texas Instruments Inc.
5 * Benoit Parrot, <bparrot@ti.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/ioctl.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/delay.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/slab.h>
20 #include <linux/videodev2.h>
21 #include <linux/of_device.h>
22 #include <linux/of_graph.h>
24 #include <media/v4l2-fwnode.h>
25 #include <media/v4l2-async.h>
26 #include <media/v4l2-common.h>
27 #include <media/v4l2-ctrls.h>
28 #include <media/v4l2-device.h>
29 #include <media/v4l2-event.h>
30 #include <media/v4l2-ioctl.h>
31 #include <media/v4l2-fh.h>
32 #include <media/videobuf2-core.h>
33 #include <media/videobuf2-dma-contig.h>
34 #include "cal_regs.h"
36 #define CAL_MODULE_NAME "cal"
38 #define MAX_WIDTH 1920
39 #define MAX_HEIGHT 1200
41 #define CAL_VERSION "0.1.0"
43 MODULE_DESCRIPTION("TI CAL driver");
44 MODULE_AUTHOR("Benoit Parrot, <bparrot@ti.com>");
45 MODULE_LICENSE("GPL v2");
46 MODULE_VERSION(CAL_VERSION);
48 static unsigned video_nr = -1;
49 module_param(video_nr, uint, 0644);
50 MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
52 static unsigned debug;
53 module_param(debug, uint, 0644);
54 MODULE_PARM_DESC(debug, "activates debug info");
56 /* timeperframe: min/max and default */
57 static const struct v4l2_fract
58 tpf_default = {.numerator = 1001, .denominator = 30000};
60 #define cal_dbg(level, caldev, fmt, arg...) \
61 v4l2_dbg(level, debug, &caldev->v4l2_dev, fmt, ##arg)
62 #define cal_info(caldev, fmt, arg...) \
63 v4l2_info(&caldev->v4l2_dev, fmt, ##arg)
64 #define cal_err(caldev, fmt, arg...) \
65 v4l2_err(&caldev->v4l2_dev, fmt, ##arg)
67 #define ctx_dbg(level, ctx, fmt, arg...) \
68 v4l2_dbg(level, debug, &ctx->v4l2_dev, fmt, ##arg)
69 #define ctx_info(ctx, fmt, arg...) \
70 v4l2_info(&ctx->v4l2_dev, fmt, ##arg)
71 #define ctx_err(ctx, fmt, arg...) \
72 v4l2_err(&ctx->v4l2_dev, fmt, ##arg)
74 #define CAL_NUM_INPUT 1
75 #define CAL_NUM_CONTEXT 2
77 #define bytes_per_line(pixel, bpp) (ALIGN(pixel * bpp, 16))
79 #define reg_read(dev, offset) ioread32(dev->base + offset)
80 #define reg_write(dev, offset, val) iowrite32(val, dev->base + offset)
82 #define reg_read_field(dev, offset, mask) get_field(reg_read(dev, offset), \
83 mask)
84 #define reg_write_field(dev, offset, field, mask) { \
85 u32 val = reg_read(dev, offset); \
86 set_field(&val, field, mask); \
87 reg_write(dev, offset, val); }
89 /* ------------------------------------------------------------------
90 * Basic structures
91 * ------------------------------------------------------------------
94 struct cal_fmt {
95 u32 fourcc;
96 u32 code;
97 u8 depth;
100 static struct cal_fmt cal_formats[] = {
102 .fourcc = V4L2_PIX_FMT_YUYV,
103 .code = MEDIA_BUS_FMT_YUYV8_2X8,
104 .depth = 16,
105 }, {
106 .fourcc = V4L2_PIX_FMT_UYVY,
107 .code = MEDIA_BUS_FMT_UYVY8_2X8,
108 .depth = 16,
109 }, {
110 .fourcc = V4L2_PIX_FMT_YVYU,
111 .code = MEDIA_BUS_FMT_YVYU8_2X8,
112 .depth = 16,
113 }, {
114 .fourcc = V4L2_PIX_FMT_VYUY,
115 .code = MEDIA_BUS_FMT_VYUY8_2X8,
116 .depth = 16,
117 }, {
118 .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
119 .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
120 .depth = 16,
121 }, {
122 .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
123 .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
124 .depth = 16,
125 }, {
126 .fourcc = V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */
127 .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
128 .depth = 16,
129 }, {
130 .fourcc = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
131 .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
132 .depth = 16,
133 }, {
134 .fourcc = V4L2_PIX_FMT_RGB24, /* rgb */
135 .code = MEDIA_BUS_FMT_RGB888_2X12_LE,
136 .depth = 24,
137 }, {
138 .fourcc = V4L2_PIX_FMT_BGR24, /* bgr */
139 .code = MEDIA_BUS_FMT_RGB888_2X12_BE,
140 .depth = 24,
141 }, {
142 .fourcc = V4L2_PIX_FMT_RGB32, /* argb */
143 .code = MEDIA_BUS_FMT_ARGB8888_1X32,
144 .depth = 32,
145 }, {
146 .fourcc = V4L2_PIX_FMT_SBGGR8,
147 .code = MEDIA_BUS_FMT_SBGGR8_1X8,
148 .depth = 8,
149 }, {
150 .fourcc = V4L2_PIX_FMT_SGBRG8,
151 .code = MEDIA_BUS_FMT_SGBRG8_1X8,
152 .depth = 8,
153 }, {
154 .fourcc = V4L2_PIX_FMT_SGRBG8,
155 .code = MEDIA_BUS_FMT_SGRBG8_1X8,
156 .depth = 8,
157 }, {
158 .fourcc = V4L2_PIX_FMT_SRGGB8,
159 .code = MEDIA_BUS_FMT_SRGGB8_1X8,
160 .depth = 8,
161 }, {
162 .fourcc = V4L2_PIX_FMT_SBGGR10,
163 .code = MEDIA_BUS_FMT_SBGGR10_1X10,
164 .depth = 16,
165 }, {
166 .fourcc = V4L2_PIX_FMT_SGBRG10,
167 .code = MEDIA_BUS_FMT_SGBRG10_1X10,
168 .depth = 16,
169 }, {
170 .fourcc = V4L2_PIX_FMT_SGRBG10,
171 .code = MEDIA_BUS_FMT_SGRBG10_1X10,
172 .depth = 16,
173 }, {
174 .fourcc = V4L2_PIX_FMT_SRGGB10,
175 .code = MEDIA_BUS_FMT_SRGGB10_1X10,
176 .depth = 16,
177 }, {
178 .fourcc = V4L2_PIX_FMT_SBGGR12,
179 .code = MEDIA_BUS_FMT_SBGGR12_1X12,
180 .depth = 16,
181 }, {
182 .fourcc = V4L2_PIX_FMT_SGBRG12,
183 .code = MEDIA_BUS_FMT_SGBRG12_1X12,
184 .depth = 16,
185 }, {
186 .fourcc = V4L2_PIX_FMT_SGRBG12,
187 .code = MEDIA_BUS_FMT_SGRBG12_1X12,
188 .depth = 16,
189 }, {
190 .fourcc = V4L2_PIX_FMT_SRGGB12,
191 .code = MEDIA_BUS_FMT_SRGGB12_1X12,
192 .depth = 16,
196 /* Print Four-character-code (FOURCC) */
197 static char *fourcc_to_str(u32 fmt)
199 static char code[5];
201 code[0] = (unsigned char)(fmt & 0xff);
202 code[1] = (unsigned char)((fmt >> 8) & 0xff);
203 code[2] = (unsigned char)((fmt >> 16) & 0xff);
204 code[3] = (unsigned char)((fmt >> 24) & 0xff);
205 code[4] = '\0';
207 return code;
210 /* buffer for one video frame */
211 struct cal_buffer {
212 /* common v4l buffer stuff -- must be first */
213 struct vb2_v4l2_buffer vb;
214 struct list_head list;
215 const struct cal_fmt *fmt;
218 struct cal_dmaqueue {
219 struct list_head active;
221 /* Counters to control fps rate */
222 int frame;
223 int ini_jiffies;
226 struct cm_data {
227 void __iomem *base;
228 struct resource *res;
230 unsigned int camerrx_control;
232 struct platform_device *pdev;
235 struct cc_data {
236 void __iomem *base;
237 struct resource *res;
239 struct platform_device *pdev;
243 * there is one cal_dev structure in the driver, it is shared by
244 * all instances.
246 struct cal_dev {
247 int irq;
248 void __iomem *base;
249 struct resource *res;
250 struct platform_device *pdev;
251 struct v4l2_device v4l2_dev;
253 /* Control Module handle */
254 struct cm_data *cm;
255 /* Camera Core Module handle */
256 struct cc_data *cc[CAL_NUM_CSI2_PORTS];
258 struct cal_ctx *ctx[CAL_NUM_CONTEXT];
262 * There is one cal_ctx structure for each camera core context.
264 struct cal_ctx {
265 struct v4l2_device v4l2_dev;
266 struct v4l2_ctrl_handler ctrl_handler;
267 struct video_device vdev;
268 struct v4l2_async_notifier notifier;
269 struct v4l2_subdev *sensor;
270 struct v4l2_fwnode_endpoint endpoint;
272 struct v4l2_async_subdev asd;
273 struct v4l2_async_subdev *asd_list[1];
275 struct v4l2_fh fh;
276 struct cal_dev *dev;
277 struct cc_data *cc;
279 /* v4l2_ioctl mutex */
280 struct mutex mutex;
281 /* v4l2 buffers lock */
282 spinlock_t slock;
284 /* Several counters */
285 unsigned long jiffies;
287 struct cal_dmaqueue vidq;
289 /* Input Number */
290 int input;
292 /* video capture */
293 const struct cal_fmt *fmt;
294 /* Used to store current pixel format */
295 struct v4l2_format v_fmt;
296 /* Used to store current mbus frame format */
297 struct v4l2_mbus_framefmt m_fmt;
299 /* Current subdev enumerated format */
300 struct cal_fmt *active_fmt[ARRAY_SIZE(cal_formats)];
301 int num_active_fmt;
303 struct v4l2_fract timeperframe;
304 unsigned int sequence;
305 unsigned int external_rate;
306 struct vb2_queue vb_vidq;
307 unsigned int seq_count;
308 unsigned int csi2_port;
309 unsigned int virtual_channel;
311 /* Pointer pointing to current v4l2_buffer */
312 struct cal_buffer *cur_frm;
313 /* Pointer pointing to next v4l2_buffer */
314 struct cal_buffer *next_frm;
317 static const struct cal_fmt *find_format_by_pix(struct cal_ctx *ctx,
318 u32 pixelformat)
320 const struct cal_fmt *fmt;
321 unsigned int k;
323 for (k = 0; k < ctx->num_active_fmt; k++) {
324 fmt = ctx->active_fmt[k];
325 if (fmt->fourcc == pixelformat)
326 return fmt;
329 return NULL;
332 static const struct cal_fmt *find_format_by_code(struct cal_ctx *ctx,
333 u32 code)
335 const struct cal_fmt *fmt;
336 unsigned int k;
338 for (k = 0; k < ctx->num_active_fmt; k++) {
339 fmt = ctx->active_fmt[k];
340 if (fmt->code == code)
341 return fmt;
344 return NULL;
347 static inline struct cal_ctx *notifier_to_ctx(struct v4l2_async_notifier *n)
349 return container_of(n, struct cal_ctx, notifier);
352 static inline int get_field(u32 value, u32 mask)
354 return (value & mask) >> __ffs(mask);
357 static inline void set_field(u32 *valp, u32 field, u32 mask)
359 u32 val = *valp;
361 val &= ~mask;
362 val |= (field << __ffs(mask)) & mask;
363 *valp = val;
367 * Control Module block access
369 static struct cm_data *cm_create(struct cal_dev *dev)
371 struct platform_device *pdev = dev->pdev;
372 struct cm_data *cm;
374 cm = devm_kzalloc(&pdev->dev, sizeof(*cm), GFP_KERNEL);
375 if (!cm)
376 return ERR_PTR(-ENOMEM);
378 cm->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
379 "camerrx_control");
380 cm->base = devm_ioremap_resource(&pdev->dev, cm->res);
381 if (IS_ERR(cm->base)) {
382 cal_err(dev, "failed to ioremap\n");
383 return ERR_CAST(cm->base);
386 cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
387 cm->res->name, &cm->res->start, &cm->res->end);
389 return cm;
392 static void camerarx_phy_enable(struct cal_ctx *ctx)
394 u32 val;
396 if (!ctx->dev->cm->base) {
397 ctx_err(ctx, "cm not mapped\n");
398 return;
401 val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
402 if (ctx->csi2_port == 1) {
403 set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
404 set_field(&val, 0, CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK);
405 /* enable all lanes by default */
406 set_field(&val, 0xf, CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK);
407 set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_MODE_MASK);
408 } else if (ctx->csi2_port == 2) {
409 set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
410 set_field(&val, 0, CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK);
411 /* enable all lanes by default */
412 set_field(&val, 0x3, CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK);
413 set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_MODE_MASK);
415 reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
418 static void camerarx_phy_disable(struct cal_ctx *ctx)
420 u32 val;
422 if (!ctx->dev->cm->base) {
423 ctx_err(ctx, "cm not mapped\n");
424 return;
427 val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
428 if (ctx->csi2_port == 1)
429 set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
430 else if (ctx->csi2_port == 2)
431 set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
432 reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
436 * Camera Instance access block
438 static struct cc_data *cc_create(struct cal_dev *dev, unsigned int core)
440 struct platform_device *pdev = dev->pdev;
441 struct cc_data *cc;
443 cc = devm_kzalloc(&pdev->dev, sizeof(*cc), GFP_KERNEL);
444 if (!cc)
445 return ERR_PTR(-ENOMEM);
447 cc->res = platform_get_resource_byname(pdev,
448 IORESOURCE_MEM,
449 (core == 0) ?
450 "cal_rx_core0" :
451 "cal_rx_core1");
452 cc->base = devm_ioremap_resource(&pdev->dev, cc->res);
453 if (IS_ERR(cc->base)) {
454 cal_err(dev, "failed to ioremap\n");
455 return ERR_CAST(cc->base);
458 cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
459 cc->res->name, &cc->res->start, &cc->res->end);
461 return cc;
465 * Get Revision and HW info
467 static void cal_get_hwinfo(struct cal_dev *dev)
469 u32 revision = 0;
470 u32 hwinfo = 0;
472 revision = reg_read(dev, CAL_HL_REVISION);
473 cal_dbg(3, dev, "CAL_HL_REVISION = 0x%08x (expecting 0x40000200)\n",
474 revision);
476 hwinfo = reg_read(dev, CAL_HL_HWINFO);
477 cal_dbg(3, dev, "CAL_HL_HWINFO = 0x%08x (expecting 0xA3C90469)\n",
478 hwinfo);
481 static inline int cal_runtime_get(struct cal_dev *dev)
483 return pm_runtime_get_sync(&dev->pdev->dev);
486 static inline void cal_runtime_put(struct cal_dev *dev)
488 pm_runtime_put_sync(&dev->pdev->dev);
491 static void cal_quickdump_regs(struct cal_dev *dev)
493 cal_info(dev, "CAL Registers @ 0x%pa:\n", &dev->res->start);
494 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
495 (__force const void *)dev->base,
496 resource_size(dev->res), false);
498 if (dev->ctx[0]) {
499 cal_info(dev, "CSI2 Core 0 Registers @ %pa:\n",
500 &dev->ctx[0]->cc->res->start);
501 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
502 (__force const void *)dev->ctx[0]->cc->base,
503 resource_size(dev->ctx[0]->cc->res),
504 false);
507 if (dev->ctx[1]) {
508 cal_info(dev, "CSI2 Core 1 Registers @ %pa:\n",
509 &dev->ctx[1]->cc->res->start);
510 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
511 (__force const void *)dev->ctx[1]->cc->base,
512 resource_size(dev->ctx[1]->cc->res),
513 false);
516 cal_info(dev, "CAMERRX_Control Registers @ %pa:\n",
517 &dev->cm->res->start);
518 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
519 (__force const void *)dev->cm->base,
520 resource_size(dev->cm->res), false);
524 * Enable the expected IRQ sources
526 static void enable_irqs(struct cal_ctx *ctx)
528 /* Enable IRQ_WDMA_END 0/1 */
529 reg_write_field(ctx->dev,
530 CAL_HL_IRQENABLE_SET(2),
531 CAL_HL_IRQ_ENABLE,
532 CAL_HL_IRQ_MASK(ctx->csi2_port));
533 /* Enable IRQ_WDMA_START 0/1 */
534 reg_write_field(ctx->dev,
535 CAL_HL_IRQENABLE_SET(3),
536 CAL_HL_IRQ_ENABLE,
537 CAL_HL_IRQ_MASK(ctx->csi2_port));
538 /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
539 reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0xFF000000);
542 static void disable_irqs(struct cal_ctx *ctx)
544 /* Disable IRQ_WDMA_END 0/1 */
545 reg_write_field(ctx->dev,
546 CAL_HL_IRQENABLE_CLR(2),
547 CAL_HL_IRQ_CLEAR,
548 CAL_HL_IRQ_MASK(ctx->csi2_port));
549 /* Disable IRQ_WDMA_START 0/1 */
550 reg_write_field(ctx->dev,
551 CAL_HL_IRQENABLE_CLR(3),
552 CAL_HL_IRQ_CLEAR,
553 CAL_HL_IRQ_MASK(ctx->csi2_port));
554 /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
555 reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0);
558 static void csi2_init(struct cal_ctx *ctx)
560 int i;
561 u32 val;
563 val = reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port));
564 set_field(&val, CAL_GEN_ENABLE,
565 CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK);
566 set_field(&val, CAL_GEN_ENABLE,
567 CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK);
568 set_field(&val, CAL_GEN_DISABLE,
569 CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK);
570 set_field(&val, 407, CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK);
571 reg_write(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port), val);
572 ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x\n", ctx->csi2_port,
573 reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port)));
575 val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
576 set_field(&val, CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL,
577 CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
578 set_field(&val, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON,
579 CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK);
580 reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
581 for (i = 0; i < 10; i++) {
582 if (reg_read_field(ctx->dev,
583 CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
584 CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK) ==
585 CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ON)
586 break;
587 usleep_range(1000, 1100);
589 ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n", ctx->csi2_port,
590 reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)));
592 val = reg_read(ctx->dev, CAL_CTRL);
593 set_field(&val, CAL_CTRL_BURSTSIZE_BURST128, CAL_CTRL_BURSTSIZE_MASK);
594 set_field(&val, 0xF, CAL_CTRL_TAGCNT_MASK);
595 set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED,
596 CAL_CTRL_POSTED_WRITES_MASK);
597 set_field(&val, 0xFF, CAL_CTRL_MFLAGL_MASK);
598 set_field(&val, 0xFF, CAL_CTRL_MFLAGH_MASK);
599 reg_write(ctx->dev, CAL_CTRL, val);
600 ctx_dbg(3, ctx, "CAL_CTRL = 0x%08x\n", reg_read(ctx->dev, CAL_CTRL));
603 static void csi2_lane_config(struct cal_ctx *ctx)
605 u32 val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
606 u32 lane_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK;
607 u32 polarity_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK;
608 struct v4l2_fwnode_bus_mipi_csi2 *mipi_csi2 =
609 &ctx->endpoint.bus.mipi_csi2;
610 int lane;
612 set_field(&val, mipi_csi2->clock_lane + 1, lane_mask);
613 set_field(&val, mipi_csi2->lane_polarities[0], polarity_mask);
614 for (lane = 0; lane < mipi_csi2->num_data_lanes; lane++) {
616 * Every lane are one nibble apart starting with the
617 * clock followed by the data lanes so shift masks by 4.
619 lane_mask <<= 4;
620 polarity_mask <<= 4;
621 set_field(&val, mipi_csi2->data_lanes[lane] + 1, lane_mask);
622 set_field(&val, mipi_csi2->lane_polarities[lane + 1],
623 polarity_mask);
626 reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
627 ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n",
628 ctx->csi2_port, val);
631 static void csi2_ppi_enable(struct cal_ctx *ctx)
633 reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port),
634 CAL_GEN_ENABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
637 static void csi2_ppi_disable(struct cal_ctx *ctx)
639 reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port),
640 CAL_GEN_DISABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
643 static void csi2_ctx_config(struct cal_ctx *ctx)
645 u32 val;
647 val = reg_read(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port));
648 set_field(&val, ctx->csi2_port, CAL_CSI2_CTX_CPORT_MASK);
650 * DT type: MIPI CSI-2 Specs
651 * 0x1: All - DT filter is disabled
652 * 0x24: RGB888 1 pixel = 3 bytes
653 * 0x2B: RAW10 4 pixels = 5 bytes
654 * 0x2A: RAW8 1 pixel = 1 byte
655 * 0x1E: YUV422 2 pixels = 4 bytes
657 set_field(&val, 0x1, CAL_CSI2_CTX_DT_MASK);
658 /* Virtual Channel from the CSI2 sensor usually 0! */
659 set_field(&val, ctx->virtual_channel, CAL_CSI2_CTX_VC_MASK);
660 /* NUM_LINES_PER_FRAME => 0 means auto detect */
661 set_field(&val, 0, CAL_CSI2_CTX_LINES_MASK);
662 set_field(&val, CAL_CSI2_CTX_ATT_PIX, CAL_CSI2_CTX_ATT_MASK);
663 set_field(&val, CAL_CSI2_CTX_PACK_MODE_LINE,
664 CAL_CSI2_CTX_PACK_MODE_MASK);
665 reg_write(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port), val);
666 ctx_dbg(3, ctx, "CAL_CSI2_CTX0(%d) = 0x%08x\n", ctx->csi2_port,
667 reg_read(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port)));
670 static void pix_proc_config(struct cal_ctx *ctx)
672 u32 val;
674 val = reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port));
675 set_field(&val, CAL_PIX_PROC_EXTRACT_B8, CAL_PIX_PROC_EXTRACT_MASK);
676 set_field(&val, CAL_PIX_PROC_DPCMD_BYPASS, CAL_PIX_PROC_DPCMD_MASK);
677 set_field(&val, CAL_PIX_PROC_DPCME_BYPASS, CAL_PIX_PROC_DPCME_MASK);
678 set_field(&val, CAL_PIX_PROC_PACK_B8, CAL_PIX_PROC_PACK_MASK);
679 set_field(&val, ctx->csi2_port, CAL_PIX_PROC_CPORT_MASK);
680 set_field(&val, CAL_GEN_ENABLE, CAL_PIX_PROC_EN_MASK);
681 reg_write(ctx->dev, CAL_PIX_PROC(ctx->csi2_port), val);
682 ctx_dbg(3, ctx, "CAL_PIX_PROC(%d) = 0x%08x\n", ctx->csi2_port,
683 reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port)));
686 static void cal_wr_dma_config(struct cal_ctx *ctx,
687 unsigned int width)
689 u32 val;
691 val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port));
692 set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK);
693 set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT,
694 CAL_WR_DMA_CTRL_DTAG_MASK);
695 set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST,
696 CAL_WR_DMA_CTRL_MODE_MASK);
697 set_field(&val, CAL_WR_DMA_CTRL_PATTERN_LINEAR,
698 CAL_WR_DMA_CTRL_PATTERN_MASK);
699 set_field(&val, CAL_GEN_ENABLE, CAL_WR_DMA_CTRL_STALL_RD_MASK);
700 reg_write(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port), val);
701 ctx_dbg(3, ctx, "CAL_WR_DMA_CTRL(%d) = 0x%08x\n", ctx->csi2_port,
702 reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port)));
705 * width/16 not sure but giving it a whirl.
706 * zero does not work right
708 reg_write_field(ctx->dev,
709 CAL_WR_DMA_OFST(ctx->csi2_port),
710 (width / 16),
711 CAL_WR_DMA_OFST_MASK);
712 ctx_dbg(3, ctx, "CAL_WR_DMA_OFST(%d) = 0x%08x\n", ctx->csi2_port,
713 reg_read(ctx->dev, CAL_WR_DMA_OFST(ctx->csi2_port)));
715 val = reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port));
716 /* 64 bit word means no skipping */
717 set_field(&val, 0, CAL_WR_DMA_XSIZE_XSKIP_MASK);
719 * (width*8)/64 this should be size of an entire line
720 * in 64bit word but 0 means all data until the end
721 * is detected automagically
723 set_field(&val, (width / 8), CAL_WR_DMA_XSIZE_MASK);
724 reg_write(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port), val);
725 ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->csi2_port,
726 reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port)));
729 static void cal_wr_dma_addr(struct cal_ctx *ctx, unsigned int dmaaddr)
731 reg_write(ctx->dev, CAL_WR_DMA_ADDR(ctx->csi2_port), dmaaddr);
735 * TCLK values are OK at their reset values
737 #define TCLK_TERM 0
738 #define TCLK_MISS 1
739 #define TCLK_SETTLE 14
740 #define THS_SETTLE 15
742 static void csi2_phy_config(struct cal_ctx *ctx)
744 unsigned int reg0, reg1;
745 unsigned int ths_term, ths_settle;
746 unsigned int ddrclkperiod_us;
749 * THS_TERM: Programmed value = floor(20 ns/DDRClk period) - 2.
751 ddrclkperiod_us = ctx->external_rate / 2000000;
752 ddrclkperiod_us = 1000000 / ddrclkperiod_us;
753 ctx_dbg(1, ctx, "ddrclkperiod_us: %d\n", ddrclkperiod_us);
755 ths_term = 20000 / ddrclkperiod_us;
756 ths_term = (ths_term >= 2) ? ths_term - 2 : ths_term;
757 ctx_dbg(1, ctx, "ths_term: %d (0x%02x)\n", ths_term, ths_term);
760 * THS_SETTLE: Programmed value = floor(176.3 ns/CtrlClk period) - 1.
761 * Since CtrlClk is fixed at 96Mhz then we get
762 * ths_settle = floor(176.3 / 10.416) - 1 = 15
763 * If we ever switch to a dynamic clock then this code might be useful
765 * unsigned int ctrlclkperiod_us;
766 * ctrlclkperiod_us = 96000000 / 1000000;
767 * ctrlclkperiod_us = 1000000 / ctrlclkperiod_us;
768 * ctx_dbg(1, ctx, "ctrlclkperiod_us: %d\n", ctrlclkperiod_us);
770 * ths_settle = 176300 / ctrlclkperiod_us;
771 * ths_settle = (ths_settle > 1) ? ths_settle - 1 : ths_settle;
774 ths_settle = THS_SETTLE;
775 ctx_dbg(1, ctx, "ths_settle: %d (0x%02x)\n", ths_settle, ths_settle);
777 reg0 = reg_read(ctx->cc, CAL_CSI2_PHY_REG0);
778 set_field(&reg0, CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE,
779 CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK);
780 set_field(&reg0, ths_term, CAL_CSI2_PHY_REG0_THS_TERM_MASK);
781 set_field(&reg0, ths_settle, CAL_CSI2_PHY_REG0_THS_SETTLE_MASK);
783 ctx_dbg(1, ctx, "CSI2_%d_REG0 = 0x%08x\n", (ctx->csi2_port - 1), reg0);
784 reg_write(ctx->cc, CAL_CSI2_PHY_REG0, reg0);
786 reg1 = reg_read(ctx->cc, CAL_CSI2_PHY_REG1);
787 set_field(&reg1, TCLK_TERM, CAL_CSI2_PHY_REG1_TCLK_TERM_MASK);
788 set_field(&reg1, 0xb8, CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK);
789 set_field(&reg1, TCLK_MISS, CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK);
790 set_field(&reg1, TCLK_SETTLE, CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK);
792 ctx_dbg(1, ctx, "CSI2_%d_REG1 = 0x%08x\n", (ctx->csi2_port - 1), reg1);
793 reg_write(ctx->cc, CAL_CSI2_PHY_REG1, reg1);
796 static int cal_get_external_info(struct cal_ctx *ctx)
798 struct v4l2_ctrl *ctrl;
800 if (!ctx->sensor)
801 return -ENODEV;
803 ctrl = v4l2_ctrl_find(ctx->sensor->ctrl_handler, V4L2_CID_PIXEL_RATE);
804 if (!ctrl) {
805 ctx_err(ctx, "no pixel rate control in subdev: %s\n",
806 ctx->sensor->name);
807 return -EPIPE;
810 ctx->external_rate = v4l2_ctrl_g_ctrl_int64(ctrl);
811 ctx_dbg(3, ctx, "sensor Pixel Rate: %d\n", ctx->external_rate);
813 return 0;
816 static inline void cal_schedule_next_buffer(struct cal_ctx *ctx)
818 struct cal_dmaqueue *dma_q = &ctx->vidq;
819 struct cal_buffer *buf;
820 unsigned long addr;
822 buf = list_entry(dma_q->active.next, struct cal_buffer, list);
823 ctx->next_frm = buf;
824 list_del(&buf->list);
826 addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
827 cal_wr_dma_addr(ctx, addr);
830 static inline void cal_process_buffer_complete(struct cal_ctx *ctx)
832 ctx->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
833 ctx->cur_frm->vb.field = ctx->m_fmt.field;
834 ctx->cur_frm->vb.sequence = ctx->sequence++;
836 vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
837 ctx->cur_frm = ctx->next_frm;
840 #define isvcirqset(irq, vc, ff) (irq & \
841 (CAL_CSI2_VC_IRQENABLE_ ##ff ##_IRQ_##vc ##_MASK))
843 #define isportirqset(irq, port) (irq & CAL_HL_IRQ_MASK(port))
845 static irqreturn_t cal_irq(int irq_cal, void *data)
847 struct cal_dev *dev = (struct cal_dev *)data;
848 struct cal_ctx *ctx;
849 struct cal_dmaqueue *dma_q;
850 u32 irqst2, irqst3;
852 /* Check which DMA just finished */
853 irqst2 = reg_read(dev, CAL_HL_IRQSTATUS(2));
854 if (irqst2) {
855 /* Clear Interrupt status */
856 reg_write(dev, CAL_HL_IRQSTATUS(2), irqst2);
858 /* Need to check both port */
859 if (isportirqset(irqst2, 1)) {
860 ctx = dev->ctx[0];
862 if (ctx->cur_frm != ctx->next_frm)
863 cal_process_buffer_complete(ctx);
866 if (isportirqset(irqst2, 2)) {
867 ctx = dev->ctx[1];
869 if (ctx->cur_frm != ctx->next_frm)
870 cal_process_buffer_complete(ctx);
874 /* Check which DMA just started */
875 irqst3 = reg_read(dev, CAL_HL_IRQSTATUS(3));
876 if (irqst3) {
877 /* Clear Interrupt status */
878 reg_write(dev, CAL_HL_IRQSTATUS(3), irqst3);
880 /* Need to check both port */
881 if (isportirqset(irqst3, 1)) {
882 ctx = dev->ctx[0];
883 dma_q = &ctx->vidq;
885 spin_lock(&ctx->slock);
886 if (!list_empty(&dma_q->active) &&
887 ctx->cur_frm == ctx->next_frm)
888 cal_schedule_next_buffer(ctx);
889 spin_unlock(&ctx->slock);
892 if (isportirqset(irqst3, 2)) {
893 ctx = dev->ctx[1];
894 dma_q = &ctx->vidq;
896 spin_lock(&ctx->slock);
897 if (!list_empty(&dma_q->active) &&
898 ctx->cur_frm == ctx->next_frm)
899 cal_schedule_next_buffer(ctx);
900 spin_unlock(&ctx->slock);
904 return IRQ_HANDLED;
908 * video ioctls
910 static int cal_querycap(struct file *file, void *priv,
911 struct v4l2_capability *cap)
913 struct cal_ctx *ctx = video_drvdata(file);
915 strlcpy(cap->driver, CAL_MODULE_NAME, sizeof(cap->driver));
916 strlcpy(cap->card, CAL_MODULE_NAME, sizeof(cap->card));
918 snprintf(cap->bus_info, sizeof(cap->bus_info),
919 "platform:%s", ctx->v4l2_dev.name);
920 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
921 V4L2_CAP_READWRITE;
922 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
923 return 0;
926 static int cal_enum_fmt_vid_cap(struct file *file, void *priv,
927 struct v4l2_fmtdesc *f)
929 struct cal_ctx *ctx = video_drvdata(file);
930 const struct cal_fmt *fmt = NULL;
932 if (f->index >= ctx->num_active_fmt)
933 return -EINVAL;
935 fmt = ctx->active_fmt[f->index];
937 f->pixelformat = fmt->fourcc;
938 f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
939 return 0;
942 static int __subdev_get_format(struct cal_ctx *ctx,
943 struct v4l2_mbus_framefmt *fmt)
945 struct v4l2_subdev_format sd_fmt;
946 struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
947 int ret;
949 sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
950 sd_fmt.pad = 0;
952 ret = v4l2_subdev_call(ctx->sensor, pad, get_fmt, NULL, &sd_fmt);
953 if (ret)
954 return ret;
956 *fmt = *mbus_fmt;
958 ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
959 fmt->width, fmt->height, fmt->code);
961 return 0;
964 static int __subdev_set_format(struct cal_ctx *ctx,
965 struct v4l2_mbus_framefmt *fmt)
967 struct v4l2_subdev_format sd_fmt;
968 struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
969 int ret;
971 sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
972 sd_fmt.pad = 0;
973 *mbus_fmt = *fmt;
975 ret = v4l2_subdev_call(ctx->sensor, pad, set_fmt, NULL, &sd_fmt);
976 if (ret)
977 return ret;
979 ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
980 fmt->width, fmt->height, fmt->code);
982 return 0;
985 static int cal_calc_format_size(struct cal_ctx *ctx,
986 const struct cal_fmt *fmt,
987 struct v4l2_format *f)
989 if (!fmt) {
990 ctx_dbg(3, ctx, "No cal_fmt provided!\n");
991 return -EINVAL;
994 v4l_bound_align_image(&f->fmt.pix.width, 48, MAX_WIDTH, 2,
995 &f->fmt.pix.height, 32, MAX_HEIGHT, 0, 0);
996 f->fmt.pix.bytesperline = bytes_per_line(f->fmt.pix.width,
997 fmt->depth >> 3);
998 f->fmt.pix.sizeimage = f->fmt.pix.height *
999 f->fmt.pix.bytesperline;
1001 ctx_dbg(3, ctx, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n",
1002 __func__, fourcc_to_str(f->fmt.pix.pixelformat),
1003 f->fmt.pix.width, f->fmt.pix.height,
1004 f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
1006 return 0;
1009 static int cal_g_fmt_vid_cap(struct file *file, void *priv,
1010 struct v4l2_format *f)
1012 struct cal_ctx *ctx = video_drvdata(file);
1014 *f = ctx->v_fmt;
1016 return 0;
1019 static int cal_try_fmt_vid_cap(struct file *file, void *priv,
1020 struct v4l2_format *f)
1022 struct cal_ctx *ctx = video_drvdata(file);
1023 const struct cal_fmt *fmt;
1024 struct v4l2_subdev_frame_size_enum fse;
1025 int ret, found;
1027 fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
1028 if (!fmt) {
1029 ctx_dbg(3, ctx, "Fourcc format (0x%08x) not found.\n",
1030 f->fmt.pix.pixelformat);
1032 /* Just get the first one enumerated */
1033 fmt = ctx->active_fmt[0];
1034 f->fmt.pix.pixelformat = fmt->fourcc;
1037 f->fmt.pix.field = ctx->v_fmt.fmt.pix.field;
1039 /* check for/find a valid width/height */
1040 ret = 0;
1041 found = false;
1042 fse.pad = 0;
1043 fse.code = fmt->code;
1044 fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
1045 for (fse.index = 0; ; fse.index++) {
1046 ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size,
1047 NULL, &fse);
1048 if (ret)
1049 break;
1051 if ((f->fmt.pix.width == fse.max_width) &&
1052 (f->fmt.pix.height == fse.max_height)) {
1053 found = true;
1054 break;
1055 } else if ((f->fmt.pix.width >= fse.min_width) &&
1056 (f->fmt.pix.width <= fse.max_width) &&
1057 (f->fmt.pix.height >= fse.min_height) &&
1058 (f->fmt.pix.height <= fse.max_height)) {
1059 found = true;
1060 break;
1064 if (!found) {
1065 /* use existing values as default */
1066 f->fmt.pix.width = ctx->v_fmt.fmt.pix.width;
1067 f->fmt.pix.height = ctx->v_fmt.fmt.pix.height;
1071 * Use current colorspace for now, it will get
1072 * updated properly during s_fmt
1074 f->fmt.pix.colorspace = ctx->v_fmt.fmt.pix.colorspace;
1075 return cal_calc_format_size(ctx, fmt, f);
1078 static int cal_s_fmt_vid_cap(struct file *file, void *priv,
1079 struct v4l2_format *f)
1081 struct cal_ctx *ctx = video_drvdata(file);
1082 struct vb2_queue *q = &ctx->vb_vidq;
1083 const struct cal_fmt *fmt;
1084 struct v4l2_mbus_framefmt mbus_fmt;
1085 int ret;
1087 if (vb2_is_busy(q)) {
1088 ctx_dbg(3, ctx, "%s device busy\n", __func__);
1089 return -EBUSY;
1092 ret = cal_try_fmt_vid_cap(file, priv, f);
1093 if (ret < 0)
1094 return ret;
1096 fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
1098 v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, fmt->code);
1100 ret = __subdev_set_format(ctx, &mbus_fmt);
1101 if (ret)
1102 return ret;
1104 /* Just double check nothing has gone wrong */
1105 if (mbus_fmt.code != fmt->code) {
1106 ctx_dbg(3, ctx,
1107 "%s subdev changed format on us, this should not happen\n",
1108 __func__);
1109 return -EINVAL;
1112 v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
1113 ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1114 ctx->v_fmt.fmt.pix.pixelformat = fmt->fourcc;
1115 cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
1116 ctx->fmt = fmt;
1117 ctx->m_fmt = mbus_fmt;
1118 *f = ctx->v_fmt;
1120 return 0;
1123 static int cal_enum_framesizes(struct file *file, void *fh,
1124 struct v4l2_frmsizeenum *fsize)
1126 struct cal_ctx *ctx = video_drvdata(file);
1127 const struct cal_fmt *fmt;
1128 struct v4l2_subdev_frame_size_enum fse;
1129 int ret;
1131 /* check for valid format */
1132 fmt = find_format_by_pix(ctx, fsize->pixel_format);
1133 if (!fmt) {
1134 ctx_dbg(3, ctx, "Invalid pixel code: %x\n",
1135 fsize->pixel_format);
1136 return -EINVAL;
1139 fse.index = fsize->index;
1140 fse.pad = 0;
1141 fse.code = fmt->code;
1143 ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size, NULL, &fse);
1144 if (ret)
1145 return ret;
1147 ctx_dbg(1, ctx, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
1148 __func__, fse.index, fse.code, fse.min_width, fse.max_width,
1149 fse.min_height, fse.max_height);
1151 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1152 fsize->discrete.width = fse.max_width;
1153 fsize->discrete.height = fse.max_height;
1155 return 0;
1158 static int cal_enum_input(struct file *file, void *priv,
1159 struct v4l2_input *inp)
1161 if (inp->index >= CAL_NUM_INPUT)
1162 return -EINVAL;
1164 inp->type = V4L2_INPUT_TYPE_CAMERA;
1165 sprintf(inp->name, "Camera %u", inp->index);
1166 return 0;
1169 static int cal_g_input(struct file *file, void *priv, unsigned int *i)
1171 struct cal_ctx *ctx = video_drvdata(file);
1173 *i = ctx->input;
1174 return 0;
1177 static int cal_s_input(struct file *file, void *priv, unsigned int i)
1179 struct cal_ctx *ctx = video_drvdata(file);
1181 if (i >= CAL_NUM_INPUT)
1182 return -EINVAL;
1184 ctx->input = i;
1185 return 0;
1188 /* timeperframe is arbitrary and continuous */
1189 static int cal_enum_frameintervals(struct file *file, void *priv,
1190 struct v4l2_frmivalenum *fival)
1192 struct cal_ctx *ctx = video_drvdata(file);
1193 const struct cal_fmt *fmt;
1194 struct v4l2_subdev_frame_interval_enum fie = {
1195 .index = fival->index,
1196 .width = fival->width,
1197 .height = fival->height,
1198 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
1200 int ret;
1202 fmt = find_format_by_pix(ctx, fival->pixel_format);
1203 if (!fmt)
1204 return -EINVAL;
1206 fie.code = fmt->code;
1207 ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_interval,
1208 NULL, &fie);
1209 if (ret)
1210 return ret;
1211 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1212 fival->discrete = fie.interval;
1214 return 0;
1218 * Videobuf operations
1220 static int cal_queue_setup(struct vb2_queue *vq,
1221 unsigned int *nbuffers, unsigned int *nplanes,
1222 unsigned int sizes[], struct device *alloc_devs[])
1224 struct cal_ctx *ctx = vb2_get_drv_priv(vq);
1225 unsigned size = ctx->v_fmt.fmt.pix.sizeimage;
1227 if (vq->num_buffers + *nbuffers < 3)
1228 *nbuffers = 3 - vq->num_buffers;
1230 if (*nplanes) {
1231 if (sizes[0] < size)
1232 return -EINVAL;
1233 size = sizes[0];
1236 *nplanes = 1;
1237 sizes[0] = size;
1239 ctx_dbg(3, ctx, "nbuffers=%d, size=%d\n", *nbuffers, sizes[0]);
1241 return 0;
1244 static int cal_buffer_prepare(struct vb2_buffer *vb)
1246 struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1247 struct cal_buffer *buf = container_of(vb, struct cal_buffer,
1248 vb.vb2_buf);
1249 unsigned long size;
1251 if (WARN_ON(!ctx->fmt))
1252 return -EINVAL;
1254 size = ctx->v_fmt.fmt.pix.sizeimage;
1255 if (vb2_plane_size(vb, 0) < size) {
1256 ctx_err(ctx,
1257 "data will not fit into plane (%lu < %lu)\n",
1258 vb2_plane_size(vb, 0), size);
1259 return -EINVAL;
1262 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
1263 return 0;
1266 static void cal_buffer_queue(struct vb2_buffer *vb)
1268 struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1269 struct cal_buffer *buf = container_of(vb, struct cal_buffer,
1270 vb.vb2_buf);
1271 struct cal_dmaqueue *vidq = &ctx->vidq;
1272 unsigned long flags = 0;
1274 /* recheck locking */
1275 spin_lock_irqsave(&ctx->slock, flags);
1276 list_add_tail(&buf->list, &vidq->active);
1277 spin_unlock_irqrestore(&ctx->slock, flags);
1280 static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
1282 struct cal_ctx *ctx = vb2_get_drv_priv(vq);
1283 struct cal_dmaqueue *dma_q = &ctx->vidq;
1284 struct cal_buffer *buf, *tmp;
1285 unsigned long addr = 0;
1286 unsigned long flags;
1287 int ret;
1289 spin_lock_irqsave(&ctx->slock, flags);
1290 if (list_empty(&dma_q->active)) {
1291 spin_unlock_irqrestore(&ctx->slock, flags);
1292 ctx_dbg(3, ctx, "buffer queue is empty\n");
1293 return -EIO;
1296 buf = list_entry(dma_q->active.next, struct cal_buffer, list);
1297 ctx->cur_frm = buf;
1298 ctx->next_frm = buf;
1299 list_del(&buf->list);
1300 spin_unlock_irqrestore(&ctx->slock, flags);
1302 addr = vb2_dma_contig_plane_dma_addr(&ctx->cur_frm->vb.vb2_buf, 0);
1303 ctx->sequence = 0;
1305 ret = cal_get_external_info(ctx);
1306 if (ret < 0)
1307 goto err;
1309 cal_runtime_get(ctx->dev);
1311 enable_irqs(ctx);
1312 camerarx_phy_enable(ctx);
1313 csi2_init(ctx);
1314 csi2_phy_config(ctx);
1315 csi2_lane_config(ctx);
1316 csi2_ctx_config(ctx);
1317 pix_proc_config(ctx);
1318 cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline);
1319 cal_wr_dma_addr(ctx, addr);
1320 csi2_ppi_enable(ctx);
1322 ret = v4l2_subdev_call(ctx->sensor, video, s_stream, 1);
1323 if (ret) {
1324 ctx_err(ctx, "stream on failed in subdev\n");
1325 cal_runtime_put(ctx->dev);
1326 goto err;
1329 if (debug >= 4)
1330 cal_quickdump_regs(ctx->dev);
1332 return 0;
1334 err:
1335 list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
1336 list_del(&buf->list);
1337 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
1339 return ret;
1342 static void cal_stop_streaming(struct vb2_queue *vq)
1344 struct cal_ctx *ctx = vb2_get_drv_priv(vq);
1345 struct cal_dmaqueue *dma_q = &ctx->vidq;
1346 struct cal_buffer *buf, *tmp;
1347 unsigned long flags;
1349 if (v4l2_subdev_call(ctx->sensor, video, s_stream, 0))
1350 ctx_err(ctx, "stream off failed in subdev\n");
1352 csi2_ppi_disable(ctx);
1353 disable_irqs(ctx);
1355 /* Release all active buffers */
1356 spin_lock_irqsave(&ctx->slock, flags);
1357 list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
1358 list_del(&buf->list);
1359 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1362 if (ctx->cur_frm == ctx->next_frm) {
1363 vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1364 } else {
1365 vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1366 vb2_buffer_done(&ctx->next_frm->vb.vb2_buf,
1367 VB2_BUF_STATE_ERROR);
1369 ctx->cur_frm = NULL;
1370 ctx->next_frm = NULL;
1371 spin_unlock_irqrestore(&ctx->slock, flags);
1373 cal_runtime_put(ctx->dev);
1376 static const struct vb2_ops cal_video_qops = {
1377 .queue_setup = cal_queue_setup,
1378 .buf_prepare = cal_buffer_prepare,
1379 .buf_queue = cal_buffer_queue,
1380 .start_streaming = cal_start_streaming,
1381 .stop_streaming = cal_stop_streaming,
1382 .wait_prepare = vb2_ops_wait_prepare,
1383 .wait_finish = vb2_ops_wait_finish,
1386 static const struct v4l2_file_operations cal_fops = {
1387 .owner = THIS_MODULE,
1388 .open = v4l2_fh_open,
1389 .release = vb2_fop_release,
1390 .read = vb2_fop_read,
1391 .poll = vb2_fop_poll,
1392 .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
1393 .mmap = vb2_fop_mmap,
1396 static const struct v4l2_ioctl_ops cal_ioctl_ops = {
1397 .vidioc_querycap = cal_querycap,
1398 .vidioc_enum_fmt_vid_cap = cal_enum_fmt_vid_cap,
1399 .vidioc_g_fmt_vid_cap = cal_g_fmt_vid_cap,
1400 .vidioc_try_fmt_vid_cap = cal_try_fmt_vid_cap,
1401 .vidioc_s_fmt_vid_cap = cal_s_fmt_vid_cap,
1402 .vidioc_enum_framesizes = cal_enum_framesizes,
1403 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1404 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1405 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1406 .vidioc_querybuf = vb2_ioctl_querybuf,
1407 .vidioc_qbuf = vb2_ioctl_qbuf,
1408 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1409 .vidioc_enum_input = cal_enum_input,
1410 .vidioc_g_input = cal_g_input,
1411 .vidioc_s_input = cal_s_input,
1412 .vidioc_enum_frameintervals = cal_enum_frameintervals,
1413 .vidioc_streamon = vb2_ioctl_streamon,
1414 .vidioc_streamoff = vb2_ioctl_streamoff,
1415 .vidioc_log_status = v4l2_ctrl_log_status,
1416 .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
1417 .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
1420 static const struct video_device cal_videodev = {
1421 .name = CAL_MODULE_NAME,
1422 .fops = &cal_fops,
1423 .ioctl_ops = &cal_ioctl_ops,
1424 .minor = -1,
1425 .release = video_device_release_empty,
1428 /* -----------------------------------------------------------------
1429 * Initialization and module stuff
1430 * ------------------------------------------------------------------
1432 static int cal_complete_ctx(struct cal_ctx *ctx);
1434 static int cal_async_bound(struct v4l2_async_notifier *notifier,
1435 struct v4l2_subdev *subdev,
1436 struct v4l2_async_subdev *asd)
1438 struct cal_ctx *ctx = notifier_to_ctx(notifier);
1439 struct v4l2_subdev_mbus_code_enum mbus_code;
1440 int ret = 0;
1441 int i, j, k;
1443 if (ctx->sensor) {
1444 ctx_info(ctx, "Rejecting subdev %s (Already set!!)",
1445 subdev->name);
1446 return 0;
1449 ctx->sensor = subdev;
1450 ctx_dbg(1, ctx, "Using sensor %s for capture\n", subdev->name);
1452 /* Enumerate sub device formats and enable all matching local formats */
1453 ctx->num_active_fmt = 0;
1454 for (j = 0, i = 0; ret != -EINVAL; ++j) {
1455 struct cal_fmt *fmt;
1457 memset(&mbus_code, 0, sizeof(mbus_code));
1458 mbus_code.index = j;
1459 ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
1460 NULL, &mbus_code);
1461 if (ret)
1462 continue;
1464 ctx_dbg(2, ctx,
1465 "subdev %s: code: %04x idx: %d\n",
1466 subdev->name, mbus_code.code, j);
1468 for (k = 0; k < ARRAY_SIZE(cal_formats); k++) {
1469 fmt = &cal_formats[k];
1471 if (mbus_code.code == fmt->code) {
1472 ctx->active_fmt[i] = fmt;
1473 ctx_dbg(2, ctx,
1474 "matched fourcc: %s: code: %04x idx: %d\n",
1475 fourcc_to_str(fmt->fourcc),
1476 fmt->code, i);
1477 ctx->num_active_fmt = ++i;
1482 if (i == 0) {
1483 ctx_err(ctx, "No suitable format reported by subdev %s\n",
1484 subdev->name);
1485 return -EINVAL;
1488 cal_complete_ctx(ctx);
1490 return 0;
1493 static int cal_async_complete(struct v4l2_async_notifier *notifier)
1495 struct cal_ctx *ctx = notifier_to_ctx(notifier);
1496 const struct cal_fmt *fmt;
1497 struct v4l2_mbus_framefmt mbus_fmt;
1498 int ret;
1500 ret = __subdev_get_format(ctx, &mbus_fmt);
1501 if (ret)
1502 return ret;
1504 fmt = find_format_by_code(ctx, mbus_fmt.code);
1505 if (!fmt) {
1506 ctx_dbg(3, ctx, "mbus code format (0x%08x) not found.\n",
1507 mbus_fmt.code);
1508 return -EINVAL;
1511 /* Save current subdev format */
1512 v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
1513 ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1514 ctx->v_fmt.fmt.pix.pixelformat = fmt->fourcc;
1515 cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
1516 ctx->fmt = fmt;
1517 ctx->m_fmt = mbus_fmt;
1519 return 0;
1522 static const struct v4l2_async_notifier_operations cal_async_ops = {
1523 .bound = cal_async_bound,
1524 .complete = cal_async_complete,
1527 static int cal_complete_ctx(struct cal_ctx *ctx)
1529 struct video_device *vfd;
1530 struct vb2_queue *q;
1531 int ret;
1533 ctx->timeperframe = tpf_default;
1534 ctx->external_rate = 192000000;
1536 /* initialize locks */
1537 spin_lock_init(&ctx->slock);
1538 mutex_init(&ctx->mutex);
1540 /* initialize queue */
1541 q = &ctx->vb_vidq;
1542 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1543 q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
1544 q->drv_priv = ctx;
1545 q->buf_struct_size = sizeof(struct cal_buffer);
1546 q->ops = &cal_video_qops;
1547 q->mem_ops = &vb2_dma_contig_memops;
1548 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1549 q->lock = &ctx->mutex;
1550 q->min_buffers_needed = 3;
1551 q->dev = ctx->v4l2_dev.dev;
1553 ret = vb2_queue_init(q);
1554 if (ret)
1555 return ret;
1557 /* init video dma queues */
1558 INIT_LIST_HEAD(&ctx->vidq.active);
1560 vfd = &ctx->vdev;
1561 *vfd = cal_videodev;
1562 vfd->v4l2_dev = &ctx->v4l2_dev;
1563 vfd->queue = q;
1566 * Provide a mutex to v4l2 core. It will be used to protect
1567 * all fops and v4l2 ioctls.
1569 vfd->lock = &ctx->mutex;
1570 video_set_drvdata(vfd, ctx);
1572 ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
1573 if (ret < 0)
1574 return ret;
1576 v4l2_info(&ctx->v4l2_dev, "V4L2 device registered as %s\n",
1577 video_device_node_name(vfd));
1579 return 0;
1582 static struct device_node *
1583 of_get_next_port(const struct device_node *parent,
1584 struct device_node *prev)
1586 struct device_node *port = NULL;
1588 if (!parent)
1589 return NULL;
1591 if (!prev) {
1592 struct device_node *ports;
1594 * It's the first call, we have to find a port subnode
1595 * within this node or within an optional 'ports' node.
1597 ports = of_get_child_by_name(parent, "ports");
1598 if (ports)
1599 parent = ports;
1601 port = of_get_child_by_name(parent, "port");
1603 /* release the 'ports' node */
1604 of_node_put(ports);
1605 } else {
1606 struct device_node *ports;
1608 ports = of_get_parent(prev);
1609 if (!ports)
1610 return NULL;
1612 do {
1613 port = of_get_next_child(ports, prev);
1614 if (!port) {
1615 of_node_put(ports);
1616 return NULL;
1618 prev = port;
1619 } while (of_node_cmp(port->name, "port") != 0);
1622 return port;
1625 static struct device_node *
1626 of_get_next_endpoint(const struct device_node *parent,
1627 struct device_node *prev)
1629 struct device_node *ep = NULL;
1631 if (!parent)
1632 return NULL;
1634 do {
1635 ep = of_get_next_child(parent, prev);
1636 if (!ep)
1637 return NULL;
1638 prev = ep;
1639 } while (of_node_cmp(ep->name, "endpoint") != 0);
1641 return ep;
1644 static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
1646 struct platform_device *pdev = ctx->dev->pdev;
1647 struct device_node *ep_node, *port, *remote_ep,
1648 *sensor_node, *parent;
1649 struct v4l2_fwnode_endpoint *endpoint;
1650 struct v4l2_async_subdev *asd;
1651 u32 regval = 0;
1652 int ret, index, found_port = 0, lane;
1654 parent = pdev->dev.of_node;
1656 asd = &ctx->asd;
1657 endpoint = &ctx->endpoint;
1659 ep_node = NULL;
1660 port = NULL;
1661 remote_ep = NULL;
1662 sensor_node = NULL;
1663 ret = -EINVAL;
1665 ctx_dbg(3, ctx, "Scanning Port node for csi2 port: %d\n", inst);
1666 for (index = 0; index < CAL_NUM_CSI2_PORTS; index++) {
1667 port = of_get_next_port(parent, port);
1668 if (!port) {
1669 ctx_dbg(1, ctx, "No port node found for csi2 port:%d\n",
1670 index);
1671 goto cleanup_exit;
1674 /* Match the slice number with <REG> */
1675 of_property_read_u32(port, "reg", &regval);
1676 ctx_dbg(3, ctx, "port:%d inst:%d <reg>:%d\n",
1677 index, inst, regval);
1678 if ((regval == inst) && (index == inst)) {
1679 found_port = 1;
1680 break;
1684 if (!found_port) {
1685 ctx_dbg(1, ctx, "No port node matches csi2 port:%d\n",
1686 inst);
1687 goto cleanup_exit;
1690 ctx_dbg(3, ctx, "Scanning sub-device for csi2 port: %d\n",
1691 inst);
1693 ep_node = of_get_next_endpoint(port, ep_node);
1694 if (!ep_node) {
1695 ctx_dbg(3, ctx, "can't get next endpoint\n");
1696 goto cleanup_exit;
1699 sensor_node = of_graph_get_remote_port_parent(ep_node);
1700 if (!sensor_node) {
1701 ctx_dbg(3, ctx, "can't get remote parent\n");
1702 goto cleanup_exit;
1704 asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
1705 asd->match.fwnode = of_fwnode_handle(sensor_node);
1707 remote_ep = of_graph_get_remote_endpoint(ep_node);
1708 if (!remote_ep) {
1709 ctx_dbg(3, ctx, "can't get remote-endpoint\n");
1710 goto cleanup_exit;
1712 v4l2_fwnode_endpoint_parse(of_fwnode_handle(remote_ep), endpoint);
1714 if (endpoint->bus_type != V4L2_MBUS_CSI2) {
1715 ctx_err(ctx, "Port:%d sub-device %s is not a CSI2 device\n",
1716 inst, sensor_node->name);
1717 goto cleanup_exit;
1720 /* Store Virtual Channel number */
1721 ctx->virtual_channel = endpoint->base.id;
1723 ctx_dbg(3, ctx, "Port:%d v4l2-endpoint: CSI2\n", inst);
1724 ctx_dbg(3, ctx, "Virtual Channel=%d\n", ctx->virtual_channel);
1725 ctx_dbg(3, ctx, "flags=0x%08x\n", endpoint->bus.mipi_csi2.flags);
1726 ctx_dbg(3, ctx, "clock_lane=%d\n", endpoint->bus.mipi_csi2.clock_lane);
1727 ctx_dbg(3, ctx, "num_data_lanes=%d\n",
1728 endpoint->bus.mipi_csi2.num_data_lanes);
1729 ctx_dbg(3, ctx, "data_lanes= <\n");
1730 for (lane = 0; lane < endpoint->bus.mipi_csi2.num_data_lanes; lane++)
1731 ctx_dbg(3, ctx, "\t%d\n",
1732 endpoint->bus.mipi_csi2.data_lanes[lane]);
1733 ctx_dbg(3, ctx, "\t>\n");
1735 ctx_dbg(1, ctx, "Port: %d found sub-device %s\n",
1736 inst, sensor_node->name);
1738 ctx->asd_list[0] = asd;
1739 ctx->notifier.subdevs = ctx->asd_list;
1740 ctx->notifier.num_subdevs = 1;
1741 ctx->notifier.ops = &cal_async_ops;
1742 ret = v4l2_async_notifier_register(&ctx->v4l2_dev,
1743 &ctx->notifier);
1744 if (ret) {
1745 ctx_err(ctx, "Error registering async notifier\n");
1746 ret = -EINVAL;
1749 cleanup_exit:
1750 if (remote_ep)
1751 of_node_put(remote_ep);
1752 if (sensor_node)
1753 of_node_put(sensor_node);
1754 if (ep_node)
1755 of_node_put(ep_node);
1756 if (port)
1757 of_node_put(port);
1759 return ret;
1762 static struct cal_ctx *cal_create_instance(struct cal_dev *dev, int inst)
1764 struct cal_ctx *ctx;
1765 struct v4l2_ctrl_handler *hdl;
1766 int ret;
1768 ctx = devm_kzalloc(&dev->pdev->dev, sizeof(*ctx), GFP_KERNEL);
1769 if (!ctx)
1770 return NULL;
1772 /* save the cal_dev * for future ref */
1773 ctx->dev = dev;
1775 snprintf(ctx->v4l2_dev.name, sizeof(ctx->v4l2_dev.name),
1776 "%s-%03d", CAL_MODULE_NAME, inst);
1777 ret = v4l2_device_register(&dev->pdev->dev, &ctx->v4l2_dev);
1778 if (ret)
1779 goto err_exit;
1781 hdl = &ctx->ctrl_handler;
1782 ret = v4l2_ctrl_handler_init(hdl, 11);
1783 if (ret) {
1784 ctx_err(ctx, "Failed to init ctrl handler\n");
1785 goto unreg_dev;
1787 ctx->v4l2_dev.ctrl_handler = hdl;
1789 /* Make sure Camera Core H/W register area is available */
1790 ctx->cc = dev->cc[inst];
1792 /* Store the instance id */
1793 ctx->csi2_port = inst + 1;
1795 ret = of_cal_create_instance(ctx, inst);
1796 if (ret) {
1797 ret = -EINVAL;
1798 goto free_hdl;
1800 return ctx;
1802 free_hdl:
1803 v4l2_ctrl_handler_free(hdl);
1804 unreg_dev:
1805 v4l2_device_unregister(&ctx->v4l2_dev);
1806 err_exit:
1807 return NULL;
1810 static int cal_probe(struct platform_device *pdev)
1812 struct cal_dev *dev;
1813 int ret;
1814 int irq;
1816 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1817 if (!dev)
1818 return -ENOMEM;
1820 /* set pseudo v4l2 device name so we can use v4l2_printk */
1821 strlcpy(dev->v4l2_dev.name, CAL_MODULE_NAME,
1822 sizeof(dev->v4l2_dev.name));
1824 /* save pdev pointer */
1825 dev->pdev = pdev;
1827 dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1828 "cal_top");
1829 dev->base = devm_ioremap_resource(&pdev->dev, dev->res);
1830 if (IS_ERR(dev->base))
1831 return PTR_ERR(dev->base);
1833 cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
1834 dev->res->name, &dev->res->start, &dev->res->end);
1836 irq = platform_get_irq(pdev, 0);
1837 cal_dbg(1, dev, "got irq# %d\n", irq);
1838 ret = devm_request_irq(&pdev->dev, irq, cal_irq, 0, CAL_MODULE_NAME,
1839 dev);
1840 if (ret)
1841 return ret;
1843 platform_set_drvdata(pdev, dev);
1845 dev->cm = cm_create(dev);
1846 if (IS_ERR(dev->cm))
1847 return PTR_ERR(dev->cm);
1849 dev->cc[0] = cc_create(dev, 0);
1850 if (IS_ERR(dev->cc[0]))
1851 return PTR_ERR(dev->cc[0]);
1853 dev->cc[1] = cc_create(dev, 1);
1854 if (IS_ERR(dev->cc[1]))
1855 return PTR_ERR(dev->cc[1]);
1857 dev->ctx[0] = NULL;
1858 dev->ctx[1] = NULL;
1860 dev->ctx[0] = cal_create_instance(dev, 0);
1861 dev->ctx[1] = cal_create_instance(dev, 1);
1862 if (!dev->ctx[0] && !dev->ctx[1]) {
1863 cal_err(dev, "Neither port is configured, no point in staying up\n");
1864 return -ENODEV;
1867 pm_runtime_enable(&pdev->dev);
1869 ret = cal_runtime_get(dev);
1870 if (ret)
1871 goto runtime_disable;
1873 /* Just check we can actually access the module */
1874 cal_get_hwinfo(dev);
1876 cal_runtime_put(dev);
1878 return 0;
1880 runtime_disable:
1881 pm_runtime_disable(&pdev->dev);
1882 return ret;
1885 static int cal_remove(struct platform_device *pdev)
1887 struct cal_dev *dev =
1888 (struct cal_dev *)platform_get_drvdata(pdev);
1889 struct cal_ctx *ctx;
1890 int i;
1892 cal_dbg(1, dev, "Removing %s\n", CAL_MODULE_NAME);
1894 cal_runtime_get(dev);
1896 for (i = 0; i < CAL_NUM_CONTEXT; i++) {
1897 ctx = dev->ctx[i];
1898 if (ctx) {
1899 ctx_dbg(1, ctx, "unregistering %s\n",
1900 video_device_node_name(&ctx->vdev));
1901 camerarx_phy_disable(ctx);
1902 v4l2_async_notifier_unregister(&ctx->notifier);
1903 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
1904 v4l2_device_unregister(&ctx->v4l2_dev);
1905 video_unregister_device(&ctx->vdev);
1909 cal_runtime_put(dev);
1910 pm_runtime_disable(&pdev->dev);
1912 return 0;
1915 #if defined(CONFIG_OF)
1916 static const struct of_device_id cal_of_match[] = {
1917 { .compatible = "ti,dra72-cal", },
1920 MODULE_DEVICE_TABLE(of, cal_of_match);
1921 #endif
1923 static struct platform_driver cal_pdrv = {
1924 .probe = cal_probe,
1925 .remove = cal_remove,
1926 .driver = {
1927 .name = CAL_MODULE_NAME,
1928 .of_match_table = of_match_ptr(cal_of_match),
1932 module_platform_driver(cal_pdrv);