x86/build: Don't add -maccumulate-outgoing-args w/o compiler support
[linux/fpc-iii.git] / drivers / media / platform / sh_veu.c
blob15a562af13c774711c8478cf682491e916b8784b
1 /*
2 * sh-mobile VEU mem2mem driver
4 * Copyright (C) 2012 Renesas Electronics Corporation
5 * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
6 * Copyright (C) 2008 Magnus Damm
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the version 2 of the GNU General Public License as
10 * published by the Free Software Foundation
13 #include <linux/err.h>
14 #include <linux/fs.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/slab.h>
22 #include <linux/types.h>
23 #include <linux/videodev2.h>
25 #include <media/v4l2-dev.h>
26 #include <media/v4l2-device.h>
27 #include <media/v4l2-ioctl.h>
28 #include <media/v4l2-mem2mem.h>
29 #include <media/v4l2-image-sizes.h>
30 #include <media/videobuf2-dma-contig.h>
32 #define VEU_STR 0x00 /* start register */
33 #define VEU_SWR 0x10 /* src: line length */
34 #define VEU_SSR 0x14 /* src: image size */
35 #define VEU_SAYR 0x18 /* src: y/rgb plane address */
36 #define VEU_SACR 0x1c /* src: c plane address */
37 #define VEU_BSSR 0x20 /* bundle mode register */
38 #define VEU_EDWR 0x30 /* dst: line length */
39 #define VEU_DAYR 0x34 /* dst: y/rgb plane address */
40 #define VEU_DACR 0x38 /* dst: c plane address */
41 #define VEU_TRCR 0x50 /* transform control */
42 #define VEU_RFCR 0x54 /* resize scale */
43 #define VEU_RFSR 0x58 /* resize clip */
44 #define VEU_ENHR 0x5c /* enhance */
45 #define VEU_FMCR 0x70 /* filter mode */
46 #define VEU_VTCR 0x74 /* lowpass vertical */
47 #define VEU_HTCR 0x78 /* lowpass horizontal */
48 #define VEU_APCR 0x80 /* color match */
49 #define VEU_ECCR 0x84 /* color replace */
50 #define VEU_AFXR 0x90 /* fixed mode */
51 #define VEU_SWPR 0x94 /* swap */
52 #define VEU_EIER 0xa0 /* interrupt mask */
53 #define VEU_EVTR 0xa4 /* interrupt event */
54 #define VEU_STAR 0xb0 /* status */
55 #define VEU_BSRR 0xb4 /* reset */
57 #define VEU_MCR00 0x200 /* color conversion matrix coefficient 00 */
58 #define VEU_MCR01 0x204 /* color conversion matrix coefficient 01 */
59 #define VEU_MCR02 0x208 /* color conversion matrix coefficient 02 */
60 #define VEU_MCR10 0x20c /* color conversion matrix coefficient 10 */
61 #define VEU_MCR11 0x210 /* color conversion matrix coefficient 11 */
62 #define VEU_MCR12 0x214 /* color conversion matrix coefficient 12 */
63 #define VEU_MCR20 0x218 /* color conversion matrix coefficient 20 */
64 #define VEU_MCR21 0x21c /* color conversion matrix coefficient 21 */
65 #define VEU_MCR22 0x220 /* color conversion matrix coefficient 22 */
66 #define VEU_COFFR 0x224 /* color conversion offset */
67 #define VEU_CBR 0x228 /* color conversion clip */
70 * 4092x4092 max size is the normal case. In some cases it can be reduced to
71 * 2048x2048, in other cases it can be 4092x8188 or even 8188x8188.
73 #define MAX_W 4092
74 #define MAX_H 4092
75 #define MIN_W 8
76 #define MIN_H 8
77 #define ALIGN_W 4
79 /* 3 buffers of 2048 x 1536 - 3 megapixels @ 16bpp */
80 #define VIDEO_MEM_LIMIT ALIGN(2048 * 1536 * 2 * 3, 1024 * 1024)
82 #define MEM2MEM_DEF_TRANSLEN 1
84 struct sh_veu_dev;
86 struct sh_veu_file {
87 struct sh_veu_dev *veu_dev;
88 bool cfg_needed;
91 struct sh_veu_format {
92 char *name;
93 u32 fourcc;
94 unsigned int depth;
95 unsigned int ydepth;
98 /* video data format */
99 struct sh_veu_vfmt {
100 /* Replace with v4l2_rect */
101 struct v4l2_rect frame;
102 unsigned int bytesperline;
103 unsigned int offset_y;
104 unsigned int offset_c;
105 const struct sh_veu_format *fmt;
108 struct sh_veu_dev {
109 struct v4l2_device v4l2_dev;
110 struct video_device vdev;
111 struct v4l2_m2m_dev *m2m_dev;
112 struct device *dev;
113 struct v4l2_m2m_ctx *m2m_ctx;
114 struct sh_veu_vfmt vfmt_out;
115 struct sh_veu_vfmt vfmt_in;
116 /* Only single user per direction so far */
117 struct sh_veu_file *capture;
118 struct sh_veu_file *output;
119 struct mutex fop_lock;
120 void __iomem *base;
121 spinlock_t lock;
122 bool is_2h;
123 unsigned int xaction;
124 bool aborting;
127 enum sh_veu_fmt_idx {
128 SH_VEU_FMT_NV12,
129 SH_VEU_FMT_NV16,
130 SH_VEU_FMT_NV24,
131 SH_VEU_FMT_RGB332,
132 SH_VEU_FMT_RGB444,
133 SH_VEU_FMT_RGB565,
134 SH_VEU_FMT_RGB666,
135 SH_VEU_FMT_RGB24,
138 #define DEFAULT_IN_WIDTH VGA_WIDTH
139 #define DEFAULT_IN_HEIGHT VGA_HEIGHT
140 #define DEFAULT_IN_FMTIDX SH_VEU_FMT_NV12
141 #define DEFAULT_OUT_WIDTH VGA_WIDTH
142 #define DEFAULT_OUT_HEIGHT VGA_HEIGHT
143 #define DEFAULT_OUT_FMTIDX SH_VEU_FMT_RGB565
146 * Alignment: Y-plane should be 4-byte aligned for NV12 and NV16, and 8-byte
147 * aligned for NV24.
149 static const struct sh_veu_format sh_veu_fmt[] = {
150 [SH_VEU_FMT_NV12] = { .ydepth = 8, .depth = 12, .name = "NV12", .fourcc = V4L2_PIX_FMT_NV12 },
151 [SH_VEU_FMT_NV16] = { .ydepth = 8, .depth = 16, .name = "NV16", .fourcc = V4L2_PIX_FMT_NV16 },
152 [SH_VEU_FMT_NV24] = { .ydepth = 8, .depth = 24, .name = "NV24", .fourcc = V4L2_PIX_FMT_NV24 },
153 [SH_VEU_FMT_RGB332] = { .ydepth = 8, .depth = 8, .name = "RGB332", .fourcc = V4L2_PIX_FMT_RGB332 },
154 [SH_VEU_FMT_RGB444] = { .ydepth = 16, .depth = 16, .name = "RGB444", .fourcc = V4L2_PIX_FMT_RGB444 },
155 [SH_VEU_FMT_RGB565] = { .ydepth = 16, .depth = 16, .name = "RGB565", .fourcc = V4L2_PIX_FMT_RGB565 },
156 [SH_VEU_FMT_RGB666] = { .ydepth = 32, .depth = 32, .name = "BGR666", .fourcc = V4L2_PIX_FMT_BGR666 },
157 [SH_VEU_FMT_RGB24] = { .ydepth = 24, .depth = 24, .name = "RGB24", .fourcc = V4L2_PIX_FMT_RGB24 },
160 #define DEFAULT_IN_VFMT (struct sh_veu_vfmt){ \
161 .frame = { \
162 .width = VGA_WIDTH, \
163 .height = VGA_HEIGHT, \
164 }, \
165 .bytesperline = (VGA_WIDTH * sh_veu_fmt[DEFAULT_IN_FMTIDX].ydepth) >> 3, \
166 .fmt = &sh_veu_fmt[DEFAULT_IN_FMTIDX], \
169 #define DEFAULT_OUT_VFMT (struct sh_veu_vfmt){ \
170 .frame = { \
171 .width = VGA_WIDTH, \
172 .height = VGA_HEIGHT, \
173 }, \
174 .bytesperline = (VGA_WIDTH * sh_veu_fmt[DEFAULT_OUT_FMTIDX].ydepth) >> 3, \
175 .fmt = &sh_veu_fmt[DEFAULT_OUT_FMTIDX], \
179 * TODO: add support for further output formats:
180 * SH_VEU_FMT_NV12,
181 * SH_VEU_FMT_NV16,
182 * SH_VEU_FMT_NV24,
183 * SH_VEU_FMT_RGB332,
184 * SH_VEU_FMT_RGB444,
185 * SH_VEU_FMT_RGB666,
186 * SH_VEU_FMT_RGB24,
189 static const int sh_veu_fmt_out[] = {
190 SH_VEU_FMT_RGB565,
194 * TODO: add support for further input formats:
195 * SH_VEU_FMT_NV16,
196 * SH_VEU_FMT_NV24,
197 * SH_VEU_FMT_RGB565,
198 * SH_VEU_FMT_RGB666,
199 * SH_VEU_FMT_RGB24,
201 static const int sh_veu_fmt_in[] = {
202 SH_VEU_FMT_NV12,
205 static enum v4l2_colorspace sh_veu_4cc2cspace(u32 fourcc)
207 switch (fourcc) {
208 default:
209 BUG();
210 case V4L2_PIX_FMT_NV12:
211 case V4L2_PIX_FMT_NV16:
212 case V4L2_PIX_FMT_NV24:
213 return V4L2_COLORSPACE_SMPTE170M;
214 case V4L2_PIX_FMT_RGB332:
215 case V4L2_PIX_FMT_RGB444:
216 case V4L2_PIX_FMT_RGB565:
217 case V4L2_PIX_FMT_BGR666:
218 case V4L2_PIX_FMT_RGB24:
219 return V4L2_COLORSPACE_SRGB;
223 static u32 sh_veu_reg_read(struct sh_veu_dev *veu, unsigned int reg)
225 return ioread32(veu->base + reg);
228 static void sh_veu_reg_write(struct sh_veu_dev *veu, unsigned int reg,
229 u32 value)
231 iowrite32(value, veu->base + reg);
234 /* ========== mem2mem callbacks ========== */
236 static void sh_veu_job_abort(void *priv)
238 struct sh_veu_dev *veu = priv;
240 /* Will cancel the transaction in the next interrupt handler */
241 veu->aborting = true;
244 static void sh_veu_process(struct sh_veu_dev *veu,
245 struct vb2_buffer *src_buf,
246 struct vb2_buffer *dst_buf)
248 dma_addr_t addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
250 sh_veu_reg_write(veu, VEU_DAYR, addr + veu->vfmt_out.offset_y);
251 sh_veu_reg_write(veu, VEU_DACR, veu->vfmt_out.offset_c ?
252 addr + veu->vfmt_out.offset_c : 0);
253 dev_dbg(veu->dev, "%s(): dst base %lx, y: %x, c: %x\n", __func__,
254 (unsigned long)addr,
255 veu->vfmt_out.offset_y, veu->vfmt_out.offset_c);
257 addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
258 sh_veu_reg_write(veu, VEU_SAYR, addr + veu->vfmt_in.offset_y);
259 sh_veu_reg_write(veu, VEU_SACR, veu->vfmt_in.offset_c ?
260 addr + veu->vfmt_in.offset_c : 0);
261 dev_dbg(veu->dev, "%s(): src base %lx, y: %x, c: %x\n", __func__,
262 (unsigned long)addr,
263 veu->vfmt_in.offset_y, veu->vfmt_in.offset_c);
265 sh_veu_reg_write(veu, VEU_STR, 1);
267 sh_veu_reg_write(veu, VEU_EIER, 1); /* enable interrupt in VEU */
271 * sh_veu_device_run() - prepares and starts the device
273 * This will be called by the framework when it decides to schedule a particular
274 * instance.
276 static void sh_veu_device_run(void *priv)
278 struct sh_veu_dev *veu = priv;
279 struct vb2_buffer *src_buf, *dst_buf;
281 src_buf = v4l2_m2m_next_src_buf(veu->m2m_ctx);
282 dst_buf = v4l2_m2m_next_dst_buf(veu->m2m_ctx);
284 if (src_buf && dst_buf)
285 sh_veu_process(veu, src_buf, dst_buf);
288 /* ========== video ioctls ========== */
290 static bool sh_veu_is_streamer(struct sh_veu_dev *veu, struct sh_veu_file *veu_file,
291 enum v4l2_buf_type type)
293 return (type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
294 veu_file == veu->capture) ||
295 (type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
296 veu_file == veu->output);
299 static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
300 struct vb2_queue *dst_vq);
303 * It is not unusual to have video nodes open()ed multiple times. While some
304 * V4L2 operations are non-intrusive, like querying formats and various
305 * parameters, others, like setting formats, starting and stopping streaming,
306 * queuing and dequeuing buffers, directly affect hardware configuration and /
307 * or execution. This function verifies availability of the requested interface
308 * and, if available, reserves it for the requesting user.
310 static int sh_veu_stream_init(struct sh_veu_dev *veu, struct sh_veu_file *veu_file,
311 enum v4l2_buf_type type)
313 struct sh_veu_file **stream;
315 switch (type) {
316 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
317 stream = &veu->capture;
318 break;
319 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
320 stream = &veu->output;
321 break;
322 default:
323 return -EINVAL;
326 if (*stream == veu_file)
327 return 0;
329 if (*stream)
330 return -EBUSY;
332 *stream = veu_file;
334 return 0;
337 static int sh_veu_context_init(struct sh_veu_dev *veu)
339 if (veu->m2m_ctx)
340 return 0;
342 veu->m2m_ctx = v4l2_m2m_ctx_init(veu->m2m_dev, veu,
343 sh_veu_queue_init);
345 return PTR_ERR_OR_ZERO(veu->m2m_ctx);
348 static int sh_veu_querycap(struct file *file, void *priv,
349 struct v4l2_capability *cap)
351 strlcpy(cap->driver, "sh-veu", sizeof(cap->driver));
352 strlcpy(cap->card, "sh-mobile VEU", sizeof(cap->card));
353 strlcpy(cap->bus_info, "platform:sh-veu", sizeof(cap->bus_info));
354 cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
355 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
357 return 0;
360 static int sh_veu_enum_fmt(struct v4l2_fmtdesc *f, const int *fmt, int fmt_num)
362 if (f->index >= fmt_num)
363 return -EINVAL;
365 strlcpy(f->description, sh_veu_fmt[fmt[f->index]].name, sizeof(f->description));
366 f->pixelformat = sh_veu_fmt[fmt[f->index]].fourcc;
367 return 0;
370 static int sh_veu_enum_fmt_vid_cap(struct file *file, void *priv,
371 struct v4l2_fmtdesc *f)
373 return sh_veu_enum_fmt(f, sh_veu_fmt_out, ARRAY_SIZE(sh_veu_fmt_out));
376 static int sh_veu_enum_fmt_vid_out(struct file *file, void *priv,
377 struct v4l2_fmtdesc *f)
379 return sh_veu_enum_fmt(f, sh_veu_fmt_in, ARRAY_SIZE(sh_veu_fmt_in));
382 static struct sh_veu_vfmt *sh_veu_get_vfmt(struct sh_veu_dev *veu,
383 enum v4l2_buf_type type)
385 switch (type) {
386 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
387 return &veu->vfmt_out;
388 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
389 return &veu->vfmt_in;
390 default:
391 return NULL;
395 static int sh_veu_g_fmt(struct sh_veu_file *veu_file, struct v4l2_format *f)
397 struct v4l2_pix_format *pix = &f->fmt.pix;
398 struct sh_veu_dev *veu = veu_file->veu_dev;
399 struct sh_veu_vfmt *vfmt;
401 vfmt = sh_veu_get_vfmt(veu, f->type);
403 pix->width = vfmt->frame.width;
404 pix->height = vfmt->frame.height;
405 pix->field = V4L2_FIELD_NONE;
406 pix->pixelformat = vfmt->fmt->fourcc;
407 pix->colorspace = sh_veu_4cc2cspace(pix->pixelformat);
408 pix->bytesperline = vfmt->bytesperline;
409 pix->sizeimage = vfmt->bytesperline * pix->height *
410 vfmt->fmt->depth / vfmt->fmt->ydepth;
411 dev_dbg(veu->dev, "%s(): type: %d, size %u @ %ux%u, fmt %x\n", __func__,
412 f->type, pix->sizeimage, pix->width, pix->height, pix->pixelformat);
414 return 0;
417 static int sh_veu_g_fmt_vid_out(struct file *file, void *priv,
418 struct v4l2_format *f)
420 return sh_veu_g_fmt(priv, f);
423 static int sh_veu_g_fmt_vid_cap(struct file *file, void *priv,
424 struct v4l2_format *f)
426 return sh_veu_g_fmt(priv, f);
429 static int sh_veu_try_fmt(struct v4l2_format *f, const struct sh_veu_format *fmt)
431 struct v4l2_pix_format *pix = &f->fmt.pix;
432 unsigned int y_bytes_used;
435 * V4L2 specification suggests, that the driver should correct the
436 * format struct if any of the dimensions is unsupported
438 switch (pix->field) {
439 default:
440 case V4L2_FIELD_ANY:
441 pix->field = V4L2_FIELD_NONE;
442 /* fall through: continue handling V4L2_FIELD_NONE */
443 case V4L2_FIELD_NONE:
444 break;
447 v4l_bound_align_image(&pix->width, MIN_W, MAX_W, ALIGN_W,
448 &pix->height, MIN_H, MAX_H, 0, 0);
450 y_bytes_used = (pix->width * fmt->ydepth) >> 3;
452 if (pix->bytesperline < y_bytes_used)
453 pix->bytesperline = y_bytes_used;
454 pix->sizeimage = pix->height * pix->bytesperline * fmt->depth / fmt->ydepth;
456 pix->pixelformat = fmt->fourcc;
457 pix->colorspace = sh_veu_4cc2cspace(pix->pixelformat);
459 pr_debug("%s(): type: %d, size %u\n", __func__, f->type, pix->sizeimage);
461 return 0;
464 static const struct sh_veu_format *sh_veu_find_fmt(const struct v4l2_format *f)
466 const int *fmt;
467 int i, n, dflt;
469 pr_debug("%s(%d;%d)\n", __func__, f->type, f->fmt.pix.field);
471 switch (f->type) {
472 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
473 fmt = sh_veu_fmt_out;
474 n = ARRAY_SIZE(sh_veu_fmt_out);
475 dflt = DEFAULT_OUT_FMTIDX;
476 break;
477 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
478 default:
479 fmt = sh_veu_fmt_in;
480 n = ARRAY_SIZE(sh_veu_fmt_in);
481 dflt = DEFAULT_IN_FMTIDX;
482 break;
485 for (i = 0; i < n; i++)
486 if (sh_veu_fmt[fmt[i]].fourcc == f->fmt.pix.pixelformat)
487 return &sh_veu_fmt[fmt[i]];
489 return &sh_veu_fmt[dflt];
492 static int sh_veu_try_fmt_vid_cap(struct file *file, void *priv,
493 struct v4l2_format *f)
495 const struct sh_veu_format *fmt;
497 fmt = sh_veu_find_fmt(f);
498 if (!fmt)
499 /* wrong buffer type */
500 return -EINVAL;
502 return sh_veu_try_fmt(f, fmt);
505 static int sh_veu_try_fmt_vid_out(struct file *file, void *priv,
506 struct v4l2_format *f)
508 const struct sh_veu_format *fmt;
510 fmt = sh_veu_find_fmt(f);
511 if (!fmt)
512 /* wrong buffer type */
513 return -EINVAL;
515 return sh_veu_try_fmt(f, fmt);
518 static void sh_veu_colour_offset(struct sh_veu_dev *veu, struct sh_veu_vfmt *vfmt)
520 /* dst_left and dst_top validity will be verified in CROP / COMPOSE */
521 unsigned int left = vfmt->frame.left & ~0x03;
522 unsigned int top = vfmt->frame.top;
523 dma_addr_t offset = ((left * veu->vfmt_out.fmt->depth) >> 3) +
524 top * veu->vfmt_out.bytesperline;
525 unsigned int y_line;
527 vfmt->offset_y = offset;
529 switch (vfmt->fmt->fourcc) {
530 case V4L2_PIX_FMT_NV12:
531 case V4L2_PIX_FMT_NV16:
532 case V4L2_PIX_FMT_NV24:
533 y_line = ALIGN(vfmt->frame.width, 16);
534 vfmt->offset_c = offset + y_line * vfmt->frame.height;
535 break;
536 case V4L2_PIX_FMT_RGB332:
537 case V4L2_PIX_FMT_RGB444:
538 case V4L2_PIX_FMT_RGB565:
539 case V4L2_PIX_FMT_BGR666:
540 case V4L2_PIX_FMT_RGB24:
541 vfmt->offset_c = 0;
542 break;
543 default:
544 BUG();
548 static int sh_veu_s_fmt(struct sh_veu_file *veu_file, struct v4l2_format *f)
550 struct v4l2_pix_format *pix = &f->fmt.pix;
551 struct sh_veu_dev *veu = veu_file->veu_dev;
552 struct sh_veu_vfmt *vfmt;
553 struct vb2_queue *vq;
554 int ret = sh_veu_context_init(veu);
555 if (ret < 0)
556 return ret;
558 vq = v4l2_m2m_get_vq(veu->m2m_ctx, f->type);
559 if (!vq)
560 return -EINVAL;
562 if (vb2_is_busy(vq)) {
563 v4l2_err(&veu_file->veu_dev->v4l2_dev, "%s queue busy\n", __func__);
564 return -EBUSY;
567 vfmt = sh_veu_get_vfmt(veu, f->type);
568 /* called after try_fmt(), hence vfmt != NULL. Implicit BUG_ON() below */
570 vfmt->fmt = sh_veu_find_fmt(f);
571 /* vfmt->fmt != NULL following the same argument as above */
572 vfmt->frame.width = pix->width;
573 vfmt->frame.height = pix->height;
574 vfmt->bytesperline = pix->bytesperline;
576 sh_veu_colour_offset(veu, vfmt);
579 * We could also verify and require configuration only if any parameters
580 * actually have changed, but it is unlikely, that the user requests the
581 * same configuration several times without closing the device.
583 veu_file->cfg_needed = true;
585 dev_dbg(veu->dev,
586 "Setting format for type %d, wxh: %dx%d, fmt: %x\n",
587 f->type, pix->width, pix->height, vfmt->fmt->fourcc);
589 return 0;
592 static int sh_veu_s_fmt_vid_cap(struct file *file, void *priv,
593 struct v4l2_format *f)
595 int ret = sh_veu_try_fmt_vid_cap(file, priv, f);
596 if (ret)
597 return ret;
599 return sh_veu_s_fmt(priv, f);
602 static int sh_veu_s_fmt_vid_out(struct file *file, void *priv,
603 struct v4l2_format *f)
605 int ret = sh_veu_try_fmt_vid_out(file, priv, f);
606 if (ret)
607 return ret;
609 return sh_veu_s_fmt(priv, f);
612 static int sh_veu_reqbufs(struct file *file, void *priv,
613 struct v4l2_requestbuffers *reqbufs)
615 struct sh_veu_file *veu_file = priv;
616 struct sh_veu_dev *veu = veu_file->veu_dev;
617 int ret = sh_veu_context_init(veu);
618 if (ret < 0)
619 return ret;
621 ret = sh_veu_stream_init(veu, veu_file, reqbufs->type);
622 if (ret < 0)
623 return ret;
625 return v4l2_m2m_reqbufs(file, veu->m2m_ctx, reqbufs);
628 static int sh_veu_querybuf(struct file *file, void *priv,
629 struct v4l2_buffer *buf)
631 struct sh_veu_file *veu_file = priv;
633 if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
634 return -EBUSY;
636 return v4l2_m2m_querybuf(file, veu_file->veu_dev->m2m_ctx, buf);
639 static int sh_veu_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
641 struct sh_veu_file *veu_file = priv;
643 dev_dbg(veu_file->veu_dev->dev, "%s(%d)\n", __func__, buf->type);
644 if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
645 return -EBUSY;
647 return v4l2_m2m_qbuf(file, veu_file->veu_dev->m2m_ctx, buf);
650 static int sh_veu_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
652 struct sh_veu_file *veu_file = priv;
654 dev_dbg(veu_file->veu_dev->dev, "%s(%d)\n", __func__, buf->type);
655 if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
656 return -EBUSY;
658 return v4l2_m2m_dqbuf(file, veu_file->veu_dev->m2m_ctx, buf);
661 static void sh_veu_calc_scale(struct sh_veu_dev *veu,
662 int size_in, int size_out, int crop_out,
663 u32 *mant, u32 *frac, u32 *rep)
665 u32 fixpoint;
667 /* calculate FRAC and MANT */
668 *rep = *mant = *frac = 0;
670 if (size_in == size_out) {
671 if (crop_out != size_out)
672 *mant = 1; /* needed for cropping */
673 return;
676 /* VEU2H special upscale */
677 if (veu->is_2h && size_out > size_in) {
678 u32 fixpoint = (4096 * size_in) / size_out;
679 *mant = fixpoint / 4096;
680 *frac = (fixpoint - (*mant * 4096)) & ~0x07;
682 switch (*frac) {
683 case 0x800:
684 *rep = 1;
685 break;
686 case 0x400:
687 *rep = 3;
688 break;
689 case 0x200:
690 *rep = 7;
691 break;
693 if (*rep)
694 return;
697 fixpoint = (4096 * (size_in - 1)) / (size_out + 1);
698 *mant = fixpoint / 4096;
699 *frac = fixpoint - (*mant * 4096);
701 if (*frac & 0x07) {
703 * FIXME: do we really have to round down twice in the
704 * up-scaling case?
706 *frac &= ~0x07;
707 if (size_out > size_in)
708 *frac -= 8; /* round down if scaling up */
709 else
710 *frac += 8; /* round up if scaling down */
714 static unsigned long sh_veu_scale_v(struct sh_veu_dev *veu,
715 int size_in, int size_out, int crop_out)
717 u32 mant, frac, value, rep;
719 sh_veu_calc_scale(veu, size_in, size_out, crop_out, &mant, &frac, &rep);
721 /* set scale */
722 value = (sh_veu_reg_read(veu, VEU_RFCR) & ~0xffff0000) |
723 (((mant << 12) | frac) << 16);
725 sh_veu_reg_write(veu, VEU_RFCR, value);
727 /* set clip */
728 value = (sh_veu_reg_read(veu, VEU_RFSR) & ~0xffff0000) |
729 (((rep << 12) | crop_out) << 16);
731 sh_veu_reg_write(veu, VEU_RFSR, value);
733 return ALIGN((size_in * crop_out) / size_out, 4);
736 static unsigned long sh_veu_scale_h(struct sh_veu_dev *veu,
737 int size_in, int size_out, int crop_out)
739 u32 mant, frac, value, rep;
741 sh_veu_calc_scale(veu, size_in, size_out, crop_out, &mant, &frac, &rep);
743 /* set scale */
744 value = (sh_veu_reg_read(veu, VEU_RFCR) & ~0xffff) |
745 (mant << 12) | frac;
747 sh_veu_reg_write(veu, VEU_RFCR, value);
749 /* set clip */
750 value = (sh_veu_reg_read(veu, VEU_RFSR) & ~0xffff) |
751 (rep << 12) | crop_out;
753 sh_veu_reg_write(veu, VEU_RFSR, value);
755 return ALIGN((size_in * crop_out) / size_out, 4);
758 static void sh_veu_configure(struct sh_veu_dev *veu)
760 u32 src_width, src_stride, src_height;
761 u32 dst_width, dst_stride, dst_height;
762 u32 real_w, real_h;
764 /* reset VEU */
765 sh_veu_reg_write(veu, VEU_BSRR, 0x100);
767 src_width = veu->vfmt_in.frame.width;
768 src_height = veu->vfmt_in.frame.height;
769 src_stride = ALIGN(veu->vfmt_in.frame.width, 16);
771 dst_width = real_w = veu->vfmt_out.frame.width;
772 dst_height = real_h = veu->vfmt_out.frame.height;
773 /* Datasheet is unclear - whether it's always number of bytes or not */
774 dst_stride = veu->vfmt_out.bytesperline;
777 * So far real_w == dst_width && real_h == dst_height, but it wasn't
778 * necessarily the case in the original vidix driver, so, it may change
779 * here in the future too.
781 src_width = sh_veu_scale_h(veu, src_width, real_w, dst_width);
782 src_height = sh_veu_scale_v(veu, src_height, real_h, dst_height);
784 sh_veu_reg_write(veu, VEU_SWR, src_stride);
785 sh_veu_reg_write(veu, VEU_SSR, src_width | (src_height << 16));
786 sh_veu_reg_write(veu, VEU_BSSR, 0); /* not using bundle mode */
788 sh_veu_reg_write(veu, VEU_EDWR, dst_stride);
789 sh_veu_reg_write(veu, VEU_DACR, 0); /* unused for RGB */
791 sh_veu_reg_write(veu, VEU_SWPR, 0x67);
792 sh_veu_reg_write(veu, VEU_TRCR, (6 << 16) | (0 << 14) | 2 | 4);
794 if (veu->is_2h) {
795 sh_veu_reg_write(veu, VEU_MCR00, 0x0cc5);
796 sh_veu_reg_write(veu, VEU_MCR01, 0x0950);
797 sh_veu_reg_write(veu, VEU_MCR02, 0x0000);
799 sh_veu_reg_write(veu, VEU_MCR10, 0x397f);
800 sh_veu_reg_write(veu, VEU_MCR11, 0x0950);
801 sh_veu_reg_write(veu, VEU_MCR12, 0x3ccd);
803 sh_veu_reg_write(veu, VEU_MCR20, 0x0000);
804 sh_veu_reg_write(veu, VEU_MCR21, 0x0950);
805 sh_veu_reg_write(veu, VEU_MCR22, 0x1023);
807 sh_veu_reg_write(veu, VEU_COFFR, 0x00800010);
811 static int sh_veu_streamon(struct file *file, void *priv,
812 enum v4l2_buf_type type)
814 struct sh_veu_file *veu_file = priv;
816 if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, type))
817 return -EBUSY;
819 if (veu_file->cfg_needed) {
820 struct sh_veu_dev *veu = veu_file->veu_dev;
821 veu_file->cfg_needed = false;
822 sh_veu_configure(veu_file->veu_dev);
823 veu->xaction = 0;
824 veu->aborting = false;
827 return v4l2_m2m_streamon(file, veu_file->veu_dev->m2m_ctx, type);
830 static int sh_veu_streamoff(struct file *file, void *priv,
831 enum v4l2_buf_type type)
833 struct sh_veu_file *veu_file = priv;
835 if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, type))
836 return -EBUSY;
838 return v4l2_m2m_streamoff(file, veu_file->veu_dev->m2m_ctx, type);
841 static const struct v4l2_ioctl_ops sh_veu_ioctl_ops = {
842 .vidioc_querycap = sh_veu_querycap,
844 .vidioc_enum_fmt_vid_cap = sh_veu_enum_fmt_vid_cap,
845 .vidioc_g_fmt_vid_cap = sh_veu_g_fmt_vid_cap,
846 .vidioc_try_fmt_vid_cap = sh_veu_try_fmt_vid_cap,
847 .vidioc_s_fmt_vid_cap = sh_veu_s_fmt_vid_cap,
849 .vidioc_enum_fmt_vid_out = sh_veu_enum_fmt_vid_out,
850 .vidioc_g_fmt_vid_out = sh_veu_g_fmt_vid_out,
851 .vidioc_try_fmt_vid_out = sh_veu_try_fmt_vid_out,
852 .vidioc_s_fmt_vid_out = sh_veu_s_fmt_vid_out,
854 .vidioc_reqbufs = sh_veu_reqbufs,
855 .vidioc_querybuf = sh_veu_querybuf,
857 .vidioc_qbuf = sh_veu_qbuf,
858 .vidioc_dqbuf = sh_veu_dqbuf,
860 .vidioc_streamon = sh_veu_streamon,
861 .vidioc_streamoff = sh_veu_streamoff,
864 /* ========== Queue operations ========== */
866 static int sh_veu_queue_setup(struct vb2_queue *vq,
867 unsigned int *nbuffers, unsigned int *nplanes,
868 unsigned int sizes[], struct device *alloc_devs[])
870 struct sh_veu_dev *veu = vb2_get_drv_priv(vq);
871 struct sh_veu_vfmt *vfmt = sh_veu_get_vfmt(veu, vq->type);
872 unsigned int count = *nbuffers;
873 unsigned int size = vfmt->bytesperline * vfmt->frame.height *
874 vfmt->fmt->depth / vfmt->fmt->ydepth;
876 if (count < 2)
877 *nbuffers = count = 2;
879 if (size * count > VIDEO_MEM_LIMIT) {
880 count = VIDEO_MEM_LIMIT / size;
881 *nbuffers = count;
884 if (*nplanes)
885 return sizes[0] < size ? -EINVAL : 0;
887 *nplanes = 1;
888 sizes[0] = size;
890 dev_dbg(veu->dev, "get %d buffer(s) of size %d each.\n", count, size);
892 return 0;
895 static int sh_veu_buf_prepare(struct vb2_buffer *vb)
897 struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
898 struct sh_veu_vfmt *vfmt;
899 unsigned int sizeimage;
901 vfmt = sh_veu_get_vfmt(veu, vb->vb2_queue->type);
902 sizeimage = vfmt->bytesperline * vfmt->frame.height *
903 vfmt->fmt->depth / vfmt->fmt->ydepth;
905 if (vb2_plane_size(vb, 0) < sizeimage) {
906 dev_dbg(veu->dev, "%s data will not fit into plane (%lu < %u)\n",
907 __func__, vb2_plane_size(vb, 0), sizeimage);
908 return -EINVAL;
911 vb2_set_plane_payload(vb, 0, sizeimage);
913 return 0;
916 static void sh_veu_buf_queue(struct vb2_buffer *vb)
918 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
919 struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
920 dev_dbg(veu->dev, "%s(%d)\n", __func__, vb->type);
921 v4l2_m2m_buf_queue(veu->m2m_ctx, vbuf);
924 static const struct vb2_ops sh_veu_qops = {
925 .queue_setup = sh_veu_queue_setup,
926 .buf_prepare = sh_veu_buf_prepare,
927 .buf_queue = sh_veu_buf_queue,
928 .wait_prepare = vb2_ops_wait_prepare,
929 .wait_finish = vb2_ops_wait_finish,
932 static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
933 struct vb2_queue *dst_vq)
935 struct sh_veu_dev *veu = priv;
936 int ret;
938 memset(src_vq, 0, sizeof(*src_vq));
939 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
940 src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
941 src_vq->drv_priv = veu;
942 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
943 src_vq->ops = &sh_veu_qops;
944 src_vq->mem_ops = &vb2_dma_contig_memops;
945 src_vq->lock = &veu->fop_lock;
946 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
947 src_vq->dev = veu->v4l2_dev.dev;
949 ret = vb2_queue_init(src_vq);
950 if (ret < 0)
951 return ret;
953 memset(dst_vq, 0, sizeof(*dst_vq));
954 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
955 dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
956 dst_vq->drv_priv = veu;
957 dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
958 dst_vq->ops = &sh_veu_qops;
959 dst_vq->mem_ops = &vb2_dma_contig_memops;
960 dst_vq->lock = &veu->fop_lock;
961 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
962 dst_vq->dev = veu->v4l2_dev.dev;
964 return vb2_queue_init(dst_vq);
967 /* ========== File operations ========== */
969 static int sh_veu_open(struct file *file)
971 struct sh_veu_dev *veu = video_drvdata(file);
972 struct sh_veu_file *veu_file;
974 veu_file = kzalloc(sizeof(*veu_file), GFP_KERNEL);
975 if (!veu_file)
976 return -ENOMEM;
978 veu_file->veu_dev = veu;
979 veu_file->cfg_needed = true;
981 file->private_data = veu_file;
983 pm_runtime_get_sync(veu->dev);
985 dev_dbg(veu->dev, "Created instance %p\n", veu_file);
987 return 0;
990 static int sh_veu_release(struct file *file)
992 struct sh_veu_dev *veu = video_drvdata(file);
993 struct sh_veu_file *veu_file = file->private_data;
995 dev_dbg(veu->dev, "Releasing instance %p\n", veu_file);
997 if (veu_file == veu->capture) {
998 veu->capture = NULL;
999 vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE));
1002 if (veu_file == veu->output) {
1003 veu->output = NULL;
1004 vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT));
1007 if (!veu->output && !veu->capture && veu->m2m_ctx) {
1008 v4l2_m2m_ctx_release(veu->m2m_ctx);
1009 veu->m2m_ctx = NULL;
1012 pm_runtime_put(veu->dev);
1014 kfree(veu_file);
1016 return 0;
1019 static unsigned int sh_veu_poll(struct file *file,
1020 struct poll_table_struct *wait)
1022 struct sh_veu_file *veu_file = file->private_data;
1024 return v4l2_m2m_poll(file, veu_file->veu_dev->m2m_ctx, wait);
1027 static int sh_veu_mmap(struct file *file, struct vm_area_struct *vma)
1029 struct sh_veu_file *veu_file = file->private_data;
1031 return v4l2_m2m_mmap(file, veu_file->veu_dev->m2m_ctx, vma);
1034 static const struct v4l2_file_operations sh_veu_fops = {
1035 .owner = THIS_MODULE,
1036 .open = sh_veu_open,
1037 .release = sh_veu_release,
1038 .poll = sh_veu_poll,
1039 .unlocked_ioctl = video_ioctl2,
1040 .mmap = sh_veu_mmap,
1043 static const struct video_device sh_veu_videodev = {
1044 .name = "sh-veu",
1045 .fops = &sh_veu_fops,
1046 .ioctl_ops = &sh_veu_ioctl_ops,
1047 .minor = -1,
1048 .release = video_device_release_empty,
1049 .vfl_dir = VFL_DIR_M2M,
1052 static const struct v4l2_m2m_ops sh_veu_m2m_ops = {
1053 .device_run = sh_veu_device_run,
1054 .job_abort = sh_veu_job_abort,
1057 static irqreturn_t sh_veu_bh(int irq, void *dev_id)
1059 struct sh_veu_dev *veu = dev_id;
1061 if (veu->xaction == MEM2MEM_DEF_TRANSLEN || veu->aborting) {
1062 v4l2_m2m_job_finish(veu->m2m_dev, veu->m2m_ctx);
1063 veu->xaction = 0;
1064 } else {
1065 sh_veu_device_run(veu);
1068 return IRQ_HANDLED;
1071 static irqreturn_t sh_veu_isr(int irq, void *dev_id)
1073 struct sh_veu_dev *veu = dev_id;
1074 struct vb2_v4l2_buffer *dst;
1075 struct vb2_v4l2_buffer *src;
1076 u32 status = sh_veu_reg_read(veu, VEU_EVTR);
1078 /* bundle read mode not used */
1079 if (!(status & 1))
1080 return IRQ_NONE;
1082 /* disable interrupt in VEU */
1083 sh_veu_reg_write(veu, VEU_EIER, 0);
1084 /* halt operation */
1085 sh_veu_reg_write(veu, VEU_STR, 0);
1086 /* ack int, write 0 to clear bits */
1087 sh_veu_reg_write(veu, VEU_EVTR, status & ~1);
1089 /* conversion completed */
1090 dst = v4l2_m2m_dst_buf_remove(veu->m2m_ctx);
1091 src = v4l2_m2m_src_buf_remove(veu->m2m_ctx);
1092 if (!src || !dst)
1093 return IRQ_NONE;
1095 dst->vb2_buf.timestamp = src->vb2_buf.timestamp;
1096 dst->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1097 dst->flags |=
1098 src->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1099 dst->timecode = src->timecode;
1101 spin_lock(&veu->lock);
1102 v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
1103 v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
1104 spin_unlock(&veu->lock);
1106 veu->xaction++;
1108 return IRQ_WAKE_THREAD;
1111 static int sh_veu_probe(struct platform_device *pdev)
1113 struct sh_veu_dev *veu;
1114 struct resource *reg_res;
1115 struct video_device *vdev;
1116 int irq, ret;
1118 reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1119 irq = platform_get_irq(pdev, 0);
1121 if (!reg_res || irq <= 0) {
1122 dev_err(&pdev->dev, "Insufficient VEU platform information.\n");
1123 return -ENODEV;
1126 veu = devm_kzalloc(&pdev->dev, sizeof(*veu), GFP_KERNEL);
1127 if (!veu)
1128 return -ENOMEM;
1130 veu->is_2h = resource_size(reg_res) == 0x22c;
1132 veu->base = devm_ioremap_resource(&pdev->dev, reg_res);
1133 if (IS_ERR(veu->base))
1134 return PTR_ERR(veu->base);
1136 ret = devm_request_threaded_irq(&pdev->dev, irq, sh_veu_isr, sh_veu_bh,
1137 0, "veu", veu);
1138 if (ret < 0)
1139 return ret;
1141 ret = v4l2_device_register(&pdev->dev, &veu->v4l2_dev);
1142 if (ret < 0) {
1143 dev_err(&pdev->dev, "Error registering v4l2 device\n");
1144 return ret;
1147 vdev = &veu->vdev;
1149 *vdev = sh_veu_videodev;
1150 vdev->v4l2_dev = &veu->v4l2_dev;
1151 spin_lock_init(&veu->lock);
1152 mutex_init(&veu->fop_lock);
1153 vdev->lock = &veu->fop_lock;
1155 video_set_drvdata(vdev, veu);
1157 veu->dev = &pdev->dev;
1158 veu->vfmt_out = DEFAULT_OUT_VFMT;
1159 veu->vfmt_in = DEFAULT_IN_VFMT;
1161 veu->m2m_dev = v4l2_m2m_init(&sh_veu_m2m_ops);
1162 if (IS_ERR(veu->m2m_dev)) {
1163 ret = PTR_ERR(veu->m2m_dev);
1164 v4l2_err(&veu->v4l2_dev, "Failed to init mem2mem device: %d\n", ret);
1165 goto em2minit;
1168 pm_runtime_enable(&pdev->dev);
1169 pm_runtime_resume(&pdev->dev);
1171 ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
1172 pm_runtime_suspend(&pdev->dev);
1173 if (ret < 0)
1174 goto evidreg;
1176 return ret;
1178 evidreg:
1179 pm_runtime_disable(&pdev->dev);
1180 v4l2_m2m_release(veu->m2m_dev);
1181 em2minit:
1182 v4l2_device_unregister(&veu->v4l2_dev);
1183 return ret;
1186 static int sh_veu_remove(struct platform_device *pdev)
1188 struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
1189 struct sh_veu_dev *veu = container_of(v4l2_dev,
1190 struct sh_veu_dev, v4l2_dev);
1192 video_unregister_device(&veu->vdev);
1193 pm_runtime_disable(&pdev->dev);
1194 v4l2_m2m_release(veu->m2m_dev);
1195 v4l2_device_unregister(&veu->v4l2_dev);
1197 return 0;
1200 static struct platform_driver __refdata sh_veu_pdrv = {
1201 .remove = sh_veu_remove,
1202 .driver = {
1203 .name = "sh_veu",
1207 module_platform_driver_probe(sh_veu_pdrv, sh_veu_probe);
1209 MODULE_DESCRIPTION("sh-mobile VEU mem2mem driver");
1210 MODULE_AUTHOR("Guennadi Liakhovetski, <g.liakhovetski@gmx.de>");
1211 MODULE_LICENSE("GPL v2");