Merge tag 'regmap-fix-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux/fpc-iii.git] / drivers / media / platform / xilinx / xilinx-dma.c
blob2a56201cb853466c3f6c7786c89d00e102126e11
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Xilinx Video DMA
5 * Copyright (C) 2013-2015 Ideas on Board
6 * Copyright (C) 2013-2015 Xilinx, Inc.
8 * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
9 * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
12 #include <linux/dma/xilinx_dma.h>
13 #include <linux/lcm.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/slab.h>
19 #include <media/v4l2-dev.h>
20 #include <media/v4l2-fh.h>
21 #include <media/v4l2-ioctl.h>
22 #include <media/videobuf2-v4l2.h>
23 #include <media/videobuf2-dma-contig.h>
25 #include "xilinx-dma.h"
26 #include "xilinx-vip.h"
27 #include "xilinx-vipp.h"
29 #define XVIP_DMA_DEF_FORMAT V4L2_PIX_FMT_YUYV
30 #define XVIP_DMA_DEF_WIDTH 1920
31 #define XVIP_DMA_DEF_HEIGHT 1080
33 /* Minimum and maximum widths are expressed in bytes */
34 #define XVIP_DMA_MIN_WIDTH 1U
35 #define XVIP_DMA_MAX_WIDTH 65535U
36 #define XVIP_DMA_MIN_HEIGHT 1U
37 #define XVIP_DMA_MAX_HEIGHT 8191U
39 /* -----------------------------------------------------------------------------
40 * Helper functions
43 static struct v4l2_subdev *
44 xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
46 struct media_pad *remote;
48 remote = media_entity_remote_pad(local);
49 if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
50 return NULL;
52 if (pad)
53 *pad = remote->index;
55 return media_entity_to_v4l2_subdev(remote->entity);
58 static int xvip_dma_verify_format(struct xvip_dma *dma)
60 struct v4l2_subdev_format fmt;
61 struct v4l2_subdev *subdev;
62 int ret;
64 subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
65 if (subdev == NULL)
66 return -EPIPE;
68 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
69 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
70 if (ret < 0)
71 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
73 if (dma->fmtinfo->code != fmt.format.code ||
74 dma->format.height != fmt.format.height ||
75 dma->format.width != fmt.format.width ||
76 dma->format.colorspace != fmt.format.colorspace)
77 return -EINVAL;
79 return 0;
82 /* -----------------------------------------------------------------------------
83 * Pipeline Stream Management
86 /**
87 * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
88 * @pipe: The pipeline
89 * @start: Start (when true) or stop (when false) the pipeline
91 * Walk the entities chain starting at the pipeline output video node and start
92 * or stop all of them.
94 * Return: 0 if successful, or the return value of the failed video::s_stream
95 * operation otherwise.
97 static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
99 struct xvip_dma *dma = pipe->output;
100 struct media_entity *entity;
101 struct media_pad *pad;
102 struct v4l2_subdev *subdev;
103 int ret;
105 entity = &dma->video.entity;
106 while (1) {
107 pad = &entity->pads[0];
108 if (!(pad->flags & MEDIA_PAD_FL_SINK))
109 break;
111 pad = media_entity_remote_pad(pad);
112 if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
113 break;
115 entity = pad->entity;
116 subdev = media_entity_to_v4l2_subdev(entity);
118 ret = v4l2_subdev_call(subdev, video, s_stream, start);
119 if (start && ret < 0 && ret != -ENOIOCTLCMD)
120 return ret;
123 return 0;
127 * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
128 * @pipe: The pipeline
129 * @on: Turn the stream on when true or off when false
131 * The pipeline is shared between all DMA engines connect at its input and
132 * output. While the stream state of DMA engines can be controlled
133 * independently, pipelines have a shared stream state that enable or disable
134 * all entities in the pipeline. For this reason the pipeline uses a streaming
135 * counter that tracks the number of DMA engines that have requested the stream
136 * to be enabled.
138 * When called with the @on argument set to true, this function will increment
139 * the pipeline streaming count. If the streaming count reaches the number of
140 * DMA engines in the pipeline it will enable all entities that belong to the
141 * pipeline.
143 * Similarly, when called with the @on argument set to false, this function will
144 * decrement the pipeline streaming count and disable all entities in the
145 * pipeline when the streaming count reaches zero.
147 * Return: 0 if successful, or the return value of the failed video::s_stream
148 * operation otherwise. Stopping the pipeline never fails. The pipeline state is
149 * not updated when the operation fails.
151 static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
153 int ret = 0;
155 mutex_lock(&pipe->lock);
157 if (on) {
158 if (pipe->stream_count == pipe->num_dmas - 1) {
159 ret = xvip_pipeline_start_stop(pipe, true);
160 if (ret < 0)
161 goto done;
163 pipe->stream_count++;
164 } else {
165 if (--pipe->stream_count == 0)
166 xvip_pipeline_start_stop(pipe, false);
169 done:
170 mutex_unlock(&pipe->lock);
171 return ret;
174 static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
175 struct xvip_dma *start)
177 struct media_graph graph;
178 struct media_entity *entity = &start->video.entity;
179 struct media_device *mdev = entity->graph_obj.mdev;
180 unsigned int num_inputs = 0;
181 unsigned int num_outputs = 0;
182 int ret;
184 mutex_lock(&mdev->graph_mutex);
186 /* Walk the graph to locate the video nodes. */
187 ret = media_graph_walk_init(&graph, mdev);
188 if (ret) {
189 mutex_unlock(&mdev->graph_mutex);
190 return ret;
193 media_graph_walk_start(&graph, entity);
195 while ((entity = media_graph_walk_next(&graph))) {
196 struct xvip_dma *dma;
198 if (entity->function != MEDIA_ENT_F_IO_V4L)
199 continue;
201 dma = to_xvip_dma(media_entity_to_video_device(entity));
203 if (dma->pad.flags & MEDIA_PAD_FL_SINK) {
204 pipe->output = dma;
205 num_outputs++;
206 } else {
207 num_inputs++;
211 mutex_unlock(&mdev->graph_mutex);
213 media_graph_walk_cleanup(&graph);
215 /* We need exactly one output and zero or one input. */
216 if (num_outputs != 1 || num_inputs > 1)
217 return -EPIPE;
219 pipe->num_dmas = num_inputs + num_outputs;
221 return 0;
224 static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
226 pipe->num_dmas = 0;
227 pipe->output = NULL;
231 * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
232 * @pipe: the pipeline
234 * Decrease the pipeline use count and clean it up if we were the last user.
236 static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
238 mutex_lock(&pipe->lock);
240 /* If we're the last user clean up the pipeline. */
241 if (--pipe->use_count == 0)
242 __xvip_pipeline_cleanup(pipe);
244 mutex_unlock(&pipe->lock);
248 * xvip_pipeline_prepare - Prepare the pipeline for streaming
249 * @pipe: the pipeline
250 * @dma: DMA engine at one end of the pipeline
252 * Validate the pipeline if no user exists yet, otherwise just increase the use
253 * count.
255 * Return: 0 if successful or -EPIPE if the pipeline is not valid.
257 static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
258 struct xvip_dma *dma)
260 int ret;
262 mutex_lock(&pipe->lock);
264 /* If we're the first user validate and initialize the pipeline. */
265 if (pipe->use_count == 0) {
266 ret = xvip_pipeline_validate(pipe, dma);
267 if (ret < 0) {
268 __xvip_pipeline_cleanup(pipe);
269 goto done;
273 pipe->use_count++;
274 ret = 0;
276 done:
277 mutex_unlock(&pipe->lock);
278 return ret;
281 /* -----------------------------------------------------------------------------
282 * videobuf2 queue operations
286 * struct xvip_dma_buffer - Video DMA buffer
287 * @buf: vb2 buffer base object
288 * @queue: buffer list entry in the DMA engine queued buffers list
289 * @dma: DMA channel that uses the buffer
291 struct xvip_dma_buffer {
292 struct vb2_v4l2_buffer buf;
293 struct list_head queue;
294 struct xvip_dma *dma;
297 #define to_xvip_dma_buffer(vb) container_of(vb, struct xvip_dma_buffer, buf)
299 static void xvip_dma_complete(void *param)
301 struct xvip_dma_buffer *buf = param;
302 struct xvip_dma *dma = buf->dma;
304 spin_lock(&dma->queued_lock);
305 list_del(&buf->queue);
306 spin_unlock(&dma->queued_lock);
308 buf->buf.field = V4L2_FIELD_NONE;
309 buf->buf.sequence = dma->sequence++;
310 buf->buf.vb2_buf.timestamp = ktime_get_ns();
311 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, dma->format.sizeimage);
312 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
315 static int
316 xvip_dma_queue_setup(struct vb2_queue *vq,
317 unsigned int *nbuffers, unsigned int *nplanes,
318 unsigned int sizes[], struct device *alloc_devs[])
320 struct xvip_dma *dma = vb2_get_drv_priv(vq);
322 /* Make sure the image size is large enough. */
323 if (*nplanes)
324 return sizes[0] < dma->format.sizeimage ? -EINVAL : 0;
326 *nplanes = 1;
327 sizes[0] = dma->format.sizeimage;
329 return 0;
332 static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
334 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
335 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
336 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
338 buf->dma = dma;
340 return 0;
343 static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
345 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
346 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
347 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
348 struct dma_async_tx_descriptor *desc;
349 dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
350 u32 flags;
352 if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
353 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
354 dma->xt.dir = DMA_DEV_TO_MEM;
355 dma->xt.src_sgl = false;
356 dma->xt.dst_sgl = true;
357 dma->xt.dst_start = addr;
358 } else {
359 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
360 dma->xt.dir = DMA_MEM_TO_DEV;
361 dma->xt.src_sgl = true;
362 dma->xt.dst_sgl = false;
363 dma->xt.src_start = addr;
366 dma->xt.frame_size = 1;
367 dma->sgl[0].size = dma->format.width * dma->fmtinfo->bpp;
368 dma->sgl[0].icg = dma->format.bytesperline - dma->sgl[0].size;
369 dma->xt.numf = dma->format.height;
371 desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
372 if (!desc) {
373 dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
374 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
375 return;
377 desc->callback = xvip_dma_complete;
378 desc->callback_param = buf;
380 spin_lock_irq(&dma->queued_lock);
381 list_add_tail(&buf->queue, &dma->queued_bufs);
382 spin_unlock_irq(&dma->queued_lock);
384 dmaengine_submit(desc);
386 if (vb2_is_streaming(&dma->queue))
387 dma_async_issue_pending(dma->dma);
390 static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
392 struct xvip_dma *dma = vb2_get_drv_priv(vq);
393 struct xvip_dma_buffer *buf, *nbuf;
394 struct xvip_pipeline *pipe;
395 int ret;
397 dma->sequence = 0;
400 * Start streaming on the pipeline. No link touching an entity in the
401 * pipeline can be activated or deactivated once streaming is started.
403 * Use the pipeline object embedded in the first DMA object that starts
404 * streaming.
406 pipe = dma->video.entity.pipe
407 ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
409 ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
410 if (ret < 0)
411 goto error;
413 /* Verify that the configured format matches the output of the
414 * connected subdev.
416 ret = xvip_dma_verify_format(dma);
417 if (ret < 0)
418 goto error_stop;
420 ret = xvip_pipeline_prepare(pipe, dma);
421 if (ret < 0)
422 goto error_stop;
424 /* Start the DMA engine. This must be done before starting the blocks
425 * in the pipeline to avoid DMA synchronization issues.
427 dma_async_issue_pending(dma->dma);
429 /* Start the pipeline. */
430 xvip_pipeline_set_stream(pipe, true);
432 return 0;
434 error_stop:
435 media_pipeline_stop(&dma->video.entity);
437 error:
438 /* Give back all queued buffers to videobuf2. */
439 spin_lock_irq(&dma->queued_lock);
440 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
441 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED);
442 list_del(&buf->queue);
444 spin_unlock_irq(&dma->queued_lock);
446 return ret;
449 static void xvip_dma_stop_streaming(struct vb2_queue *vq)
451 struct xvip_dma *dma = vb2_get_drv_priv(vq);
452 struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity);
453 struct xvip_dma_buffer *buf, *nbuf;
455 /* Stop the pipeline. */
456 xvip_pipeline_set_stream(pipe, false);
458 /* Stop and reset the DMA engine. */
459 dmaengine_terminate_all(dma->dma);
461 /* Cleanup the pipeline and mark it as being stopped. */
462 xvip_pipeline_cleanup(pipe);
463 media_pipeline_stop(&dma->video.entity);
465 /* Give back all queued buffers to videobuf2. */
466 spin_lock_irq(&dma->queued_lock);
467 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
468 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
469 list_del(&buf->queue);
471 spin_unlock_irq(&dma->queued_lock);
474 static const struct vb2_ops xvip_dma_queue_qops = {
475 .queue_setup = xvip_dma_queue_setup,
476 .buf_prepare = xvip_dma_buffer_prepare,
477 .buf_queue = xvip_dma_buffer_queue,
478 .wait_prepare = vb2_ops_wait_prepare,
479 .wait_finish = vb2_ops_wait_finish,
480 .start_streaming = xvip_dma_start_streaming,
481 .stop_streaming = xvip_dma_stop_streaming,
484 /* -----------------------------------------------------------------------------
485 * V4L2 ioctls
488 static int
489 xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
491 struct v4l2_fh *vfh = file->private_data;
492 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
494 cap->capabilities = dma->xdev->v4l2_caps | V4L2_CAP_STREAMING |
495 V4L2_CAP_DEVICE_CAPS;
497 strscpy(cap->driver, "xilinx-vipp", sizeof(cap->driver));
498 strscpy(cap->card, dma->video.name, sizeof(cap->card));
499 snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%pOFn:%u",
500 dma->xdev->dev->of_node, dma->port);
502 return 0;
505 /* FIXME: without this callback function, some applications are not configured
506 * with correct formats, and it results in frames in wrong format. Whether this
507 * callback needs to be required is not clearly defined, so it should be
508 * clarified through the mailing list.
510 static int
511 xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
513 struct v4l2_fh *vfh = file->private_data;
514 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
516 if (f->index > 0)
517 return -EINVAL;
519 f->pixelformat = dma->format.pixelformat;
521 return 0;
524 static int
525 xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
527 struct v4l2_fh *vfh = file->private_data;
528 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
530 format->fmt.pix = dma->format;
532 return 0;
535 static void
536 __xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix,
537 const struct xvip_video_format **fmtinfo)
539 const struct xvip_video_format *info;
540 unsigned int min_width;
541 unsigned int max_width;
542 unsigned int min_bpl;
543 unsigned int max_bpl;
544 unsigned int width;
545 unsigned int align;
546 unsigned int bpl;
548 /* Retrieve format information and select the default format if the
549 * requested format isn't supported.
551 info = xvip_get_format_by_fourcc(pix->pixelformat);
552 if (IS_ERR(info))
553 info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
555 pix->pixelformat = info->fourcc;
556 pix->field = V4L2_FIELD_NONE;
558 /* The transfer alignment requirements are expressed in bytes. Compute
559 * the minimum and maximum values, clamp the requested width and convert
560 * it back to pixels.
562 align = lcm(dma->align, info->bpp);
563 min_width = roundup(XVIP_DMA_MIN_WIDTH, align);
564 max_width = rounddown(XVIP_DMA_MAX_WIDTH, align);
565 width = rounddown(pix->width * info->bpp, align);
567 pix->width = clamp(width, min_width, max_width) / info->bpp;
568 pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
569 XVIP_DMA_MAX_HEIGHT);
571 /* Clamp the requested bytes per line value. If the maximum bytes per
572 * line value is zero, the module doesn't support user configurable line
573 * sizes. Override the requested value with the minimum in that case.
575 min_bpl = pix->width * info->bpp;
576 max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
577 bpl = rounddown(pix->bytesperline, dma->align);
579 pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
580 pix->sizeimage = pix->bytesperline * pix->height;
582 if (fmtinfo)
583 *fmtinfo = info;
586 static int
587 xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
589 struct v4l2_fh *vfh = file->private_data;
590 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
592 __xvip_dma_try_format(dma, &format->fmt.pix, NULL);
593 return 0;
596 static int
597 xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
599 struct v4l2_fh *vfh = file->private_data;
600 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
601 const struct xvip_video_format *info;
603 __xvip_dma_try_format(dma, &format->fmt.pix, &info);
605 if (vb2_is_busy(&dma->queue))
606 return -EBUSY;
608 dma->format = format->fmt.pix;
609 dma->fmtinfo = info;
611 return 0;
614 static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
615 .vidioc_querycap = xvip_dma_querycap,
616 .vidioc_enum_fmt_vid_cap = xvip_dma_enum_format,
617 .vidioc_g_fmt_vid_cap = xvip_dma_get_format,
618 .vidioc_g_fmt_vid_out = xvip_dma_get_format,
619 .vidioc_s_fmt_vid_cap = xvip_dma_set_format,
620 .vidioc_s_fmt_vid_out = xvip_dma_set_format,
621 .vidioc_try_fmt_vid_cap = xvip_dma_try_format,
622 .vidioc_try_fmt_vid_out = xvip_dma_try_format,
623 .vidioc_reqbufs = vb2_ioctl_reqbufs,
624 .vidioc_querybuf = vb2_ioctl_querybuf,
625 .vidioc_qbuf = vb2_ioctl_qbuf,
626 .vidioc_dqbuf = vb2_ioctl_dqbuf,
627 .vidioc_create_bufs = vb2_ioctl_create_bufs,
628 .vidioc_expbuf = vb2_ioctl_expbuf,
629 .vidioc_streamon = vb2_ioctl_streamon,
630 .vidioc_streamoff = vb2_ioctl_streamoff,
633 /* -----------------------------------------------------------------------------
634 * V4L2 file operations
637 static const struct v4l2_file_operations xvip_dma_fops = {
638 .owner = THIS_MODULE,
639 .unlocked_ioctl = video_ioctl2,
640 .open = v4l2_fh_open,
641 .release = vb2_fop_release,
642 .poll = vb2_fop_poll,
643 .mmap = vb2_fop_mmap,
646 /* -----------------------------------------------------------------------------
647 * Xilinx Video DMA Core
650 int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
651 enum v4l2_buf_type type, unsigned int port)
653 char name[16];
654 int ret;
656 dma->xdev = xdev;
657 dma->port = port;
658 mutex_init(&dma->lock);
659 mutex_init(&dma->pipe.lock);
660 INIT_LIST_HEAD(&dma->queued_bufs);
661 spin_lock_init(&dma->queued_lock);
663 dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
664 dma->format.pixelformat = dma->fmtinfo->fourcc;
665 dma->format.colorspace = V4L2_COLORSPACE_SRGB;
666 dma->format.field = V4L2_FIELD_NONE;
667 dma->format.width = XVIP_DMA_DEF_WIDTH;
668 dma->format.height = XVIP_DMA_DEF_HEIGHT;
669 dma->format.bytesperline = dma->format.width * dma->fmtinfo->bpp;
670 dma->format.sizeimage = dma->format.bytesperline * dma->format.height;
672 /* Initialize the media entity... */
673 dma->pad.flags = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
674 ? MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
676 ret = media_entity_pads_init(&dma->video.entity, 1, &dma->pad);
677 if (ret < 0)
678 goto error;
680 /* ... and the video node... */
681 dma->video.fops = &xvip_dma_fops;
682 dma->video.v4l2_dev = &xdev->v4l2_dev;
683 dma->video.queue = &dma->queue;
684 snprintf(dma->video.name, sizeof(dma->video.name), "%pOFn %s %u",
685 xdev->dev->of_node,
686 type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? "output" : "input",
687 port);
688 dma->video.vfl_type = VFL_TYPE_VIDEO;
689 dma->video.vfl_dir = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
690 ? VFL_DIR_RX : VFL_DIR_TX;
691 dma->video.release = video_device_release_empty;
692 dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
693 dma->video.lock = &dma->lock;
694 dma->video.device_caps = V4L2_CAP_STREAMING;
695 if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
696 dma->video.device_caps |= V4L2_CAP_VIDEO_CAPTURE;
697 else
698 dma->video.device_caps |= V4L2_CAP_VIDEO_OUTPUT;
700 video_set_drvdata(&dma->video, dma);
702 /* ... and the buffers queue... */
703 /* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
704 * V4L2 APIs would be inefficient. Testing on the command line with a
705 * 'cat /dev/video?' thus won't be possible, but given that the driver
706 * anyway requires a test tool to setup the pipeline before any video
707 * stream can be started, requiring a specific V4L2 test tool as well
708 * instead of 'cat' isn't really a drawback.
710 dma->queue.type = type;
711 dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
712 dma->queue.lock = &dma->lock;
713 dma->queue.drv_priv = dma;
714 dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer);
715 dma->queue.ops = &xvip_dma_queue_qops;
716 dma->queue.mem_ops = &vb2_dma_contig_memops;
717 dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
718 | V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
719 dma->queue.dev = dma->xdev->dev;
720 ret = vb2_queue_init(&dma->queue);
721 if (ret < 0) {
722 dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n");
723 goto error;
726 /* ... and the DMA channel. */
727 snprintf(name, sizeof(name), "port%u", port);
728 dma->dma = dma_request_chan(dma->xdev->dev, name);
729 if (IS_ERR(dma->dma)) {
730 ret = PTR_ERR(dma->dma);
731 if (ret != -EPROBE_DEFER)
732 dev_err(dma->xdev->dev, "no VDMA channel found\n");
733 goto error;
736 dma->align = 1 << dma->dma->device->copy_align;
738 ret = video_register_device(&dma->video, VFL_TYPE_VIDEO, -1);
739 if (ret < 0) {
740 dev_err(dma->xdev->dev, "failed to register video device\n");
741 goto error;
744 return 0;
746 error:
747 xvip_dma_cleanup(dma);
748 return ret;
751 void xvip_dma_cleanup(struct xvip_dma *dma)
753 if (video_is_registered(&dma->video))
754 video_unregister_device(&dma->video);
756 if (!IS_ERR_OR_NULL(dma->dma))
757 dma_release_channel(dma->dma);
759 media_entity_cleanup(&dma->video.entity);
761 mutex_destroy(&dma->lock);
762 mutex_destroy(&dma->pipe.lock);