2 * vsp1_video.c -- R-Car VSP1 Video Node
4 * Copyright (C) 2013-2015 Renesas Electronics Corporation
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/slab.h>
18 #include <linux/v4l2-mediabus.h>
19 #include <linux/videodev2.h>
20 #include <linux/wait.h>
22 #include <media/media-entity.h>
23 #include <media/v4l2-dev.h>
24 #include <media/v4l2-fh.h>
25 #include <media/v4l2-ioctl.h>
26 #include <media/v4l2-subdev.h>
27 #include <media/videobuf2-v4l2.h>
28 #include <media/videobuf2-dma-contig.h>
33 #include "vsp1_entity.h"
34 #include "vsp1_pipe.h"
35 #include "vsp1_rwpf.h"
37 #include "vsp1_video.h"
39 #define VSP1_VIDEO_DEF_FORMAT V4L2_PIX_FMT_YUYV
40 #define VSP1_VIDEO_DEF_WIDTH 1024
41 #define VSP1_VIDEO_DEF_HEIGHT 768
43 #define VSP1_VIDEO_MIN_WIDTH 2U
44 #define VSP1_VIDEO_MAX_WIDTH 8190U
45 #define VSP1_VIDEO_MIN_HEIGHT 2U
46 #define VSP1_VIDEO_MAX_HEIGHT 8190U
48 /* -----------------------------------------------------------------------------
52 static struct v4l2_subdev
*
53 vsp1_video_remote_subdev(struct media_pad
*local
, u32
*pad
)
55 struct media_pad
*remote
;
57 remote
= media_entity_remote_pad(local
);
58 if (!remote
|| !is_media_entity_v4l2_subdev(remote
->entity
))
64 return media_entity_to_v4l2_subdev(remote
->entity
);
67 static int vsp1_video_verify_format(struct vsp1_video
*video
)
69 struct v4l2_subdev_format fmt
;
70 struct v4l2_subdev
*subdev
;
73 subdev
= vsp1_video_remote_subdev(&video
->pad
, &fmt
.pad
);
77 fmt
.which
= V4L2_SUBDEV_FORMAT_ACTIVE
;
78 ret
= v4l2_subdev_call(subdev
, pad
, get_fmt
, NULL
, &fmt
);
80 return ret
== -ENOIOCTLCMD
? -EINVAL
: ret
;
82 if (video
->rwpf
->fmtinfo
->mbus
!= fmt
.format
.code
||
83 video
->rwpf
->format
.height
!= fmt
.format
.height
||
84 video
->rwpf
->format
.width
!= fmt
.format
.width
)
90 static int __vsp1_video_try_format(struct vsp1_video
*video
,
91 struct v4l2_pix_format_mplane
*pix
,
92 const struct vsp1_format_info
**fmtinfo
)
94 static const u32 xrgb_formats
[][2] = {
95 { V4L2_PIX_FMT_RGB444
, V4L2_PIX_FMT_XRGB444
},
96 { V4L2_PIX_FMT_RGB555
, V4L2_PIX_FMT_XRGB555
},
97 { V4L2_PIX_FMT_BGR32
, V4L2_PIX_FMT_XBGR32
},
98 { V4L2_PIX_FMT_RGB32
, V4L2_PIX_FMT_XRGB32
},
101 const struct vsp1_format_info
*info
;
102 unsigned int width
= pix
->width
;
103 unsigned int height
= pix
->height
;
106 /* Backward compatibility: replace deprecated RGB formats by their XRGB
107 * equivalent. This selects the format older userspace applications want
108 * while still exposing the new format.
110 for (i
= 0; i
< ARRAY_SIZE(xrgb_formats
); ++i
) {
111 if (xrgb_formats
[i
][0] == pix
->pixelformat
) {
112 pix
->pixelformat
= xrgb_formats
[i
][1];
117 /* Retrieve format information and select the default format if the
118 * requested format isn't supported.
120 info
= vsp1_get_format_info(video
->vsp1
, pix
->pixelformat
);
122 info
= vsp1_get_format_info(video
->vsp1
, VSP1_VIDEO_DEF_FORMAT
);
124 pix
->pixelformat
= info
->fourcc
;
125 pix
->colorspace
= V4L2_COLORSPACE_SRGB
;
126 pix
->field
= V4L2_FIELD_NONE
;
127 memset(pix
->reserved
, 0, sizeof(pix
->reserved
));
129 /* Align the width and height for YUV 4:2:2 and 4:2:0 formats. */
130 width
= round_down(width
, info
->hsub
);
131 height
= round_down(height
, info
->vsub
);
133 /* Clamp the width and height. */
134 pix
->width
= clamp(width
, VSP1_VIDEO_MIN_WIDTH
, VSP1_VIDEO_MAX_WIDTH
);
135 pix
->height
= clamp(height
, VSP1_VIDEO_MIN_HEIGHT
,
136 VSP1_VIDEO_MAX_HEIGHT
);
138 /* Compute and clamp the stride and image size. While not documented in
139 * the datasheet, strides not aligned to a multiple of 128 bytes result
140 * in image corruption.
142 for (i
= 0; i
< min(info
->planes
, 2U); ++i
) {
143 unsigned int hsub
= i
> 0 ? info
->hsub
: 1;
144 unsigned int vsub
= i
> 0 ? info
->vsub
: 1;
145 unsigned int align
= 128;
148 bpl
= clamp_t(unsigned int, pix
->plane_fmt
[i
].bytesperline
,
149 pix
->width
/ hsub
* info
->bpp
[i
] / 8,
150 round_down(65535U, align
));
152 pix
->plane_fmt
[i
].bytesperline
= round_up(bpl
, align
);
153 pix
->plane_fmt
[i
].sizeimage
= pix
->plane_fmt
[i
].bytesperline
154 * pix
->height
/ vsub
;
157 if (info
->planes
== 3) {
158 /* The second and third planes must have the same stride. */
159 pix
->plane_fmt
[2].bytesperline
= pix
->plane_fmt
[1].bytesperline
;
160 pix
->plane_fmt
[2].sizeimage
= pix
->plane_fmt
[1].sizeimage
;
163 pix
->num_planes
= info
->planes
;
171 /* -----------------------------------------------------------------------------
172 * VSP1 Partition Algorithm support
175 static void vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline
*pipe
)
177 struct vsp1_device
*vsp1
= pipe
->output
->entity
.vsp1
;
178 const struct v4l2_mbus_framefmt
*format
;
179 struct vsp1_entity
*entity
;
180 unsigned int div_size
;
182 format
= vsp1_entity_get_pad_format(&pipe
->output
->entity
,
183 pipe
->output
->entity
.config
,
185 div_size
= format
->width
;
187 /* Gen2 hardware doesn't require image partitioning. */
188 if (vsp1
->info
->gen
== 2) {
189 pipe
->div_size
= div_size
;
190 pipe
->partitions
= 1;
194 list_for_each_entry(entity
, &pipe
->entities
, list_pipe
) {
195 unsigned int entity_max
= VSP1_VIDEO_MAX_WIDTH
;
197 if (entity
->ops
->max_width
) {
198 entity_max
= entity
->ops
->max_width(entity
, pipe
);
200 div_size
= min(div_size
, entity_max
);
204 pipe
->div_size
= div_size
;
205 pipe
->partitions
= DIV_ROUND_UP(format
->width
, div_size
);
209 * vsp1_video_partition - Calculate the active partition output window
211 * @div_size: pre-determined maximum partition division size
212 * @index: partition index
214 * Returns a v4l2_rect describing the partition window.
216 static struct v4l2_rect
vsp1_video_partition(struct vsp1_pipeline
*pipe
,
217 unsigned int div_size
,
220 const struct v4l2_mbus_framefmt
*format
;
221 struct v4l2_rect partition
;
222 unsigned int modulus
;
224 format
= vsp1_entity_get_pad_format(&pipe
->output
->entity
,
225 pipe
->output
->entity
.config
,
228 /* A single partition simply processes the output size in full. */
229 if (pipe
->partitions
<= 1) {
232 partition
.width
= format
->width
;
233 partition
.height
= format
->height
;
237 /* Initialise the partition with sane starting conditions. */
238 partition
.left
= index
* div_size
;
240 partition
.width
= div_size
;
241 partition
.height
= format
->height
;
243 modulus
= format
->width
% div_size
;
246 * We need to prevent the last partition from being smaller than the
247 * *minimum* width of the hardware capabilities.
249 * If the modulus is less than half of the partition size,
250 * the penultimate partition is reduced to half, which is added
251 * to the final partition: |1234|1234|1234|12|341|
252 * to prevents this: |1234|1234|1234|1234|1|.
256 * pipe->partitions is 1 based, whilst index is a 0 based index.
257 * Normalise this locally.
259 unsigned int partitions
= pipe
->partitions
- 1;
261 if (modulus
< div_size
/ 2) {
262 if (index
== partitions
- 1) {
263 /* Halve the penultimate partition. */
264 partition
.width
= div_size
/ 2;
265 } else if (index
== partitions
) {
266 /* Increase the final partition. */
267 partition
.width
= (div_size
/ 2) + modulus
;
268 partition
.left
-= div_size
/ 2;
270 } else if (index
== partitions
) {
271 partition
.width
= modulus
;
278 /* -----------------------------------------------------------------------------
279 * Pipeline Management
283 * vsp1_video_complete_buffer - Complete the current buffer
284 * @video: the video node
286 * This function completes the current buffer by filling its sequence number,
287 * time stamp and payload size, and hands it back to the videobuf core.
289 * When operating in DU output mode (deep pipeline to the DU through the LIF),
290 * the VSP1 needs to constantly supply frames to the display. In that case, if
291 * no other buffer is queued, reuse the one that has just been processed instead
292 * of handing it back to the videobuf core.
294 * Return the next queued buffer or NULL if the queue is empty.
296 static struct vsp1_vb2_buffer
*
297 vsp1_video_complete_buffer(struct vsp1_video
*video
)
299 struct vsp1_pipeline
*pipe
= video
->rwpf
->pipe
;
300 struct vsp1_vb2_buffer
*next
= NULL
;
301 struct vsp1_vb2_buffer
*done
;
305 spin_lock_irqsave(&video
->irqlock
, flags
);
307 if (list_empty(&video
->irqqueue
)) {
308 spin_unlock_irqrestore(&video
->irqlock
, flags
);
312 done
= list_first_entry(&video
->irqqueue
,
313 struct vsp1_vb2_buffer
, queue
);
315 /* In DU output mode reuse the buffer if the list is singular. */
316 if (pipe
->lif
&& list_is_singular(&video
->irqqueue
)) {
317 spin_unlock_irqrestore(&video
->irqlock
, flags
);
321 list_del(&done
->queue
);
323 if (!list_empty(&video
->irqqueue
))
324 next
= list_first_entry(&video
->irqqueue
,
325 struct vsp1_vb2_buffer
, queue
);
327 spin_unlock_irqrestore(&video
->irqlock
, flags
);
329 done
->buf
.sequence
= pipe
->sequence
;
330 done
->buf
.vb2_buf
.timestamp
= ktime_get_ns();
331 for (i
= 0; i
< done
->buf
.vb2_buf
.num_planes
; ++i
)
332 vb2_set_plane_payload(&done
->buf
.vb2_buf
, i
,
333 vb2_plane_size(&done
->buf
.vb2_buf
, i
));
334 vb2_buffer_done(&done
->buf
.vb2_buf
, VB2_BUF_STATE_DONE
);
339 static void vsp1_video_frame_end(struct vsp1_pipeline
*pipe
,
340 struct vsp1_rwpf
*rwpf
)
342 struct vsp1_video
*video
= rwpf
->video
;
343 struct vsp1_vb2_buffer
*buf
;
345 buf
= vsp1_video_complete_buffer(video
);
349 video
->rwpf
->mem
= buf
->mem
;
350 pipe
->buffers_ready
|= 1 << video
->pipe_index
;
353 static void vsp1_video_pipeline_run_partition(struct vsp1_pipeline
*pipe
,
354 struct vsp1_dl_list
*dl
)
356 struct vsp1_entity
*entity
;
358 pipe
->partition
= vsp1_video_partition(pipe
, pipe
->div_size
,
359 pipe
->current_partition
);
361 list_for_each_entry(entity
, &pipe
->entities
, list_pipe
) {
362 if (entity
->ops
->configure
)
363 entity
->ops
->configure(entity
, pipe
, dl
,
364 VSP1_ENTITY_PARAMS_PARTITION
);
368 static void vsp1_video_pipeline_run(struct vsp1_pipeline
*pipe
)
370 struct vsp1_device
*vsp1
= pipe
->output
->entity
.vsp1
;
371 struct vsp1_entity
*entity
;
374 pipe
->dl
= vsp1_dl_list_get(pipe
->output
->dlm
);
377 * Start with the runtime parameters as the configure operation can
378 * compute/cache information needed when configuring partitions. This
379 * is the case with flipping in the WPF.
381 list_for_each_entry(entity
, &pipe
->entities
, list_pipe
) {
382 if (entity
->ops
->configure
)
383 entity
->ops
->configure(entity
, pipe
, pipe
->dl
,
384 VSP1_ENTITY_PARAMS_RUNTIME
);
387 /* Run the first partition */
388 pipe
->current_partition
= 0;
389 vsp1_video_pipeline_run_partition(pipe
, pipe
->dl
);
391 /* Process consecutive partitions as necessary */
392 for (pipe
->current_partition
= 1;
393 pipe
->current_partition
< pipe
->partitions
;
394 pipe
->current_partition
++) {
395 struct vsp1_dl_list
*dl
;
398 * Partition configuration operations will utilise
399 * the pipe->current_partition variable to determine
400 * the work they should complete.
402 dl
= vsp1_dl_list_get(pipe
->output
->dlm
);
405 * An incomplete chain will still function, but output only
406 * the partitions that had a dl available. The frame end
407 * interrupt will be marked on the last dl in the chain.
410 dev_err(vsp1
->dev
, "Failed to obtain a dl list. Frame will be incomplete\n");
414 vsp1_video_pipeline_run_partition(pipe
, dl
);
415 vsp1_dl_list_add_chain(pipe
->dl
, dl
);
418 /* Complete, and commit the head display list. */
419 vsp1_dl_list_commit(pipe
->dl
);
422 vsp1_pipeline_run(pipe
);
425 static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline
*pipe
)
427 struct vsp1_device
*vsp1
= pipe
->output
->entity
.vsp1
;
428 enum vsp1_pipeline_state state
;
432 spin_lock_irqsave(&pipe
->irqlock
, flags
);
434 /* Complete buffers on all video nodes. */
435 for (i
= 0; i
< vsp1
->info
->rpf_count
; ++i
) {
436 if (!pipe
->inputs
[i
])
439 vsp1_video_frame_end(pipe
, pipe
->inputs
[i
]);
442 vsp1_video_frame_end(pipe
, pipe
->output
);
445 pipe
->state
= VSP1_PIPELINE_STOPPED
;
447 /* If a stop has been requested, mark the pipeline as stopped and
448 * return. Otherwise restart the pipeline if ready.
450 if (state
== VSP1_PIPELINE_STOPPING
)
452 else if (vsp1_pipeline_ready(pipe
))
453 vsp1_video_pipeline_run(pipe
);
455 spin_unlock_irqrestore(&pipe
->irqlock
, flags
);
458 static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline
*pipe
,
459 struct vsp1_rwpf
*input
,
460 struct vsp1_rwpf
*output
)
462 struct media_entity_enum ent_enum
;
463 struct vsp1_entity
*entity
;
464 struct media_pad
*pad
;
465 bool bru_found
= false;
468 ret
= media_entity_enum_init(&ent_enum
, &input
->entity
.vsp1
->media_dev
);
472 pad
= media_entity_remote_pad(&input
->entity
.pads
[RWPF_PAD_SOURCE
]);
480 /* We've reached a video node, that shouldn't have happened. */
481 if (!is_media_entity_v4l2_subdev(pad
->entity
)) {
486 entity
= to_vsp1_entity(
487 media_entity_to_v4l2_subdev(pad
->entity
));
489 /* A BRU is present in the pipeline, store the BRU input pad
490 * number in the input RPF for use when configuring the RPF.
492 if (entity
->type
== VSP1_ENTITY_BRU
) {
493 struct vsp1_bru
*bru
= to_bru(&entity
->subdev
);
495 bru
->inputs
[pad
->index
].rpf
= input
;
496 input
->bru_input
= pad
->index
;
501 /* We've reached the WPF, we're done. */
502 if (entity
->type
== VSP1_ENTITY_WPF
)
505 /* Ensure the branch has no loop. */
506 if (media_entity_enum_test_and_set(&ent_enum
,
507 &entity
->subdev
.entity
)) {
512 /* UDS can't be chained. */
513 if (entity
->type
== VSP1_ENTITY_UDS
) {
520 pipe
->uds_input
= bru_found
? pipe
->bru
524 /* Follow the source link. The link setup operations ensure
525 * that the output fan-out can't be more than one, there is thus
526 * no need to verify here that only a single source link is
529 pad
= &entity
->pads
[entity
->source_pad
];
530 pad
= media_entity_remote_pad(pad
);
533 /* The last entity must be the output WPF. */
534 if (entity
!= &output
->entity
)
538 media_entity_enum_cleanup(&ent_enum
);
543 static int vsp1_video_pipeline_build(struct vsp1_pipeline
*pipe
,
544 struct vsp1_video
*video
)
546 struct media_entity_graph graph
;
547 struct media_entity
*entity
= &video
->video
.entity
;
548 struct media_device
*mdev
= entity
->graph_obj
.mdev
;
552 /* Walk the graph to locate the entities and video nodes. */
553 ret
= media_entity_graph_walk_init(&graph
, mdev
);
557 media_entity_graph_walk_start(&graph
, entity
);
559 while ((entity
= media_entity_graph_walk_next(&graph
))) {
560 struct v4l2_subdev
*subdev
;
561 struct vsp1_rwpf
*rwpf
;
562 struct vsp1_entity
*e
;
564 if (!is_media_entity_v4l2_subdev(entity
))
567 subdev
= media_entity_to_v4l2_subdev(entity
);
568 e
= to_vsp1_entity(subdev
);
569 list_add_tail(&e
->list_pipe
, &pipe
->entities
);
571 if (e
->type
== VSP1_ENTITY_RPF
) {
572 rwpf
= to_rwpf(subdev
);
573 pipe
->inputs
[rwpf
->entity
.index
] = rwpf
;
574 rwpf
->video
->pipe_index
= ++pipe
->num_inputs
;
576 } else if (e
->type
== VSP1_ENTITY_WPF
) {
577 rwpf
= to_rwpf(subdev
);
579 rwpf
->video
->pipe_index
= 0;
581 } else if (e
->type
== VSP1_ENTITY_LIF
) {
583 } else if (e
->type
== VSP1_ENTITY_BRU
) {
588 media_entity_graph_walk_cleanup(&graph
);
590 /* We need one output and at least one input. */
591 if (pipe
->num_inputs
== 0 || !pipe
->output
)
594 /* Follow links downstream for each input and make sure the graph
595 * contains no loop and that all branches end at the output WPF.
597 for (i
= 0; i
< video
->vsp1
->info
->rpf_count
; ++i
) {
598 if (!pipe
->inputs
[i
])
601 ret
= vsp1_video_pipeline_build_branch(pipe
, pipe
->inputs
[i
],
610 static int vsp1_video_pipeline_init(struct vsp1_pipeline
*pipe
,
611 struct vsp1_video
*video
)
613 vsp1_pipeline_init(pipe
);
615 pipe
->frame_end
= vsp1_video_pipeline_frame_end
;
617 return vsp1_video_pipeline_build(pipe
, video
);
620 static struct vsp1_pipeline
*vsp1_video_pipeline_get(struct vsp1_video
*video
)
622 struct vsp1_pipeline
*pipe
;
625 /* Get a pipeline object for the video node. If a pipeline has already
626 * been allocated just increment its reference count and return it.
627 * Otherwise allocate a new pipeline and initialize it, it will be freed
628 * when the last reference is released.
630 if (!video
->rwpf
->pipe
) {
631 pipe
= kzalloc(sizeof(*pipe
), GFP_KERNEL
);
633 return ERR_PTR(-ENOMEM
);
635 ret
= vsp1_video_pipeline_init(pipe
, video
);
637 vsp1_pipeline_reset(pipe
);
642 pipe
= video
->rwpf
->pipe
;
643 kref_get(&pipe
->kref
);
649 static void vsp1_video_pipeline_release(struct kref
*kref
)
651 struct vsp1_pipeline
*pipe
= container_of(kref
, typeof(*pipe
), kref
);
653 vsp1_pipeline_reset(pipe
);
657 static void vsp1_video_pipeline_put(struct vsp1_pipeline
*pipe
)
659 struct media_device
*mdev
= &pipe
->output
->entity
.vsp1
->media_dev
;
661 mutex_lock(&mdev
->graph_mutex
);
662 kref_put(&pipe
->kref
, vsp1_video_pipeline_release
);
663 mutex_unlock(&mdev
->graph_mutex
);
666 /* -----------------------------------------------------------------------------
667 * videobuf2 Queue Operations
671 vsp1_video_queue_setup(struct vb2_queue
*vq
,
672 unsigned int *nbuffers
, unsigned int *nplanes
,
673 unsigned int sizes
[], struct device
*alloc_devs
[])
675 struct vsp1_video
*video
= vb2_get_drv_priv(vq
);
676 const struct v4l2_pix_format_mplane
*format
= &video
->rwpf
->format
;
680 if (*nplanes
!= format
->num_planes
)
683 for (i
= 0; i
< *nplanes
; i
++)
684 if (sizes
[i
] < format
->plane_fmt
[i
].sizeimage
)
689 *nplanes
= format
->num_planes
;
691 for (i
= 0; i
< format
->num_planes
; ++i
)
692 sizes
[i
] = format
->plane_fmt
[i
].sizeimage
;
697 static int vsp1_video_buffer_prepare(struct vb2_buffer
*vb
)
699 struct vb2_v4l2_buffer
*vbuf
= to_vb2_v4l2_buffer(vb
);
700 struct vsp1_video
*video
= vb2_get_drv_priv(vb
->vb2_queue
);
701 struct vsp1_vb2_buffer
*buf
= to_vsp1_vb2_buffer(vbuf
);
702 const struct v4l2_pix_format_mplane
*format
= &video
->rwpf
->format
;
705 if (vb
->num_planes
< format
->num_planes
)
708 for (i
= 0; i
< vb
->num_planes
; ++i
) {
709 buf
->mem
.addr
[i
] = vb2_dma_contig_plane_dma_addr(vb
, i
);
711 if (vb2_plane_size(vb
, i
) < format
->plane_fmt
[i
].sizeimage
)
716 buf
->mem
.addr
[i
] = 0;
721 static void vsp1_video_buffer_queue(struct vb2_buffer
*vb
)
723 struct vb2_v4l2_buffer
*vbuf
= to_vb2_v4l2_buffer(vb
);
724 struct vsp1_video
*video
= vb2_get_drv_priv(vb
->vb2_queue
);
725 struct vsp1_pipeline
*pipe
= video
->rwpf
->pipe
;
726 struct vsp1_vb2_buffer
*buf
= to_vsp1_vb2_buffer(vbuf
);
730 spin_lock_irqsave(&video
->irqlock
, flags
);
731 empty
= list_empty(&video
->irqqueue
);
732 list_add_tail(&buf
->queue
, &video
->irqqueue
);
733 spin_unlock_irqrestore(&video
->irqlock
, flags
);
738 spin_lock_irqsave(&pipe
->irqlock
, flags
);
740 video
->rwpf
->mem
= buf
->mem
;
741 pipe
->buffers_ready
|= 1 << video
->pipe_index
;
743 if (vb2_is_streaming(&video
->queue
) &&
744 vsp1_pipeline_ready(pipe
))
745 vsp1_video_pipeline_run(pipe
);
747 spin_unlock_irqrestore(&pipe
->irqlock
, flags
);
750 static int vsp1_video_setup_pipeline(struct vsp1_pipeline
*pipe
)
752 struct vsp1_entity
*entity
;
754 /* Determine this pipelines sizes for image partitioning support. */
755 vsp1_video_pipeline_setup_partitions(pipe
);
757 /* Prepare the display list. */
758 pipe
->dl
= vsp1_dl_list_get(pipe
->output
->dlm
);
763 struct vsp1_uds
*uds
= to_uds(&pipe
->uds
->subdev
);
765 /* If a BRU is present in the pipeline before the UDS, the alpha
766 * component doesn't need to be scaled as the BRU output alpha
767 * value is fixed to 255. Otherwise we need to scale the alpha
768 * component only when available at the input RPF.
770 if (pipe
->uds_input
->type
== VSP1_ENTITY_BRU
) {
771 uds
->scale_alpha
= false;
773 struct vsp1_rwpf
*rpf
=
774 to_rwpf(&pipe
->uds_input
->subdev
);
776 uds
->scale_alpha
= rpf
->fmtinfo
->alpha
;
780 list_for_each_entry(entity
, &pipe
->entities
, list_pipe
) {
781 vsp1_entity_route_setup(entity
, pipe
->dl
);
783 if (entity
->ops
->configure
)
784 entity
->ops
->configure(entity
, pipe
, pipe
->dl
,
785 VSP1_ENTITY_PARAMS_INIT
);
791 static int vsp1_video_start_streaming(struct vb2_queue
*vq
, unsigned int count
)
793 struct vsp1_video
*video
= vb2_get_drv_priv(vq
);
794 struct vsp1_pipeline
*pipe
= video
->rwpf
->pipe
;
795 bool start_pipeline
= false;
799 mutex_lock(&pipe
->lock
);
800 if (pipe
->stream_count
== pipe
->num_inputs
) {
801 ret
= vsp1_video_setup_pipeline(pipe
);
803 mutex_unlock(&pipe
->lock
);
807 start_pipeline
= true;
810 pipe
->stream_count
++;
811 mutex_unlock(&pipe
->lock
);
814 * vsp1_pipeline_ready() is not sufficient to establish that all streams
815 * are prepared and the pipeline is configured, as multiple streams
816 * can race through streamon with buffers already queued; Therefore we
817 * don't even attempt to start the pipeline until the last stream has
818 * called through here.
823 spin_lock_irqsave(&pipe
->irqlock
, flags
);
824 if (vsp1_pipeline_ready(pipe
))
825 vsp1_video_pipeline_run(pipe
);
826 spin_unlock_irqrestore(&pipe
->irqlock
, flags
);
831 static void vsp1_video_stop_streaming(struct vb2_queue
*vq
)
833 struct vsp1_video
*video
= vb2_get_drv_priv(vq
);
834 struct vsp1_pipeline
*pipe
= video
->rwpf
->pipe
;
835 struct vsp1_vb2_buffer
*buffer
;
840 * Clear the buffers ready flag to make sure the device won't be started
841 * by a QBUF on the video node on the other side of the pipeline.
843 spin_lock_irqsave(&video
->irqlock
, flags
);
844 pipe
->buffers_ready
&= ~(1 << video
->pipe_index
);
845 spin_unlock_irqrestore(&video
->irqlock
, flags
);
847 mutex_lock(&pipe
->lock
);
848 if (--pipe
->stream_count
== pipe
->num_inputs
) {
849 /* Stop the pipeline. */
850 ret
= vsp1_pipeline_stop(pipe
);
851 if (ret
== -ETIMEDOUT
)
852 dev_err(video
->vsp1
->dev
, "pipeline stop timeout\n");
854 vsp1_dl_list_put(pipe
->dl
);
857 mutex_unlock(&pipe
->lock
);
859 media_entity_pipeline_stop(&video
->video
.entity
);
860 vsp1_video_pipeline_put(pipe
);
862 /* Remove all buffers from the IRQ queue. */
863 spin_lock_irqsave(&video
->irqlock
, flags
);
864 list_for_each_entry(buffer
, &video
->irqqueue
, queue
)
865 vb2_buffer_done(&buffer
->buf
.vb2_buf
, VB2_BUF_STATE_ERROR
);
866 INIT_LIST_HEAD(&video
->irqqueue
);
867 spin_unlock_irqrestore(&video
->irqlock
, flags
);
870 static const struct vb2_ops vsp1_video_queue_qops
= {
871 .queue_setup
= vsp1_video_queue_setup
,
872 .buf_prepare
= vsp1_video_buffer_prepare
,
873 .buf_queue
= vsp1_video_buffer_queue
,
874 .wait_prepare
= vb2_ops_wait_prepare
,
875 .wait_finish
= vb2_ops_wait_finish
,
876 .start_streaming
= vsp1_video_start_streaming
,
877 .stop_streaming
= vsp1_video_stop_streaming
,
880 /* -----------------------------------------------------------------------------
885 vsp1_video_querycap(struct file
*file
, void *fh
, struct v4l2_capability
*cap
)
887 struct v4l2_fh
*vfh
= file
->private_data
;
888 struct vsp1_video
*video
= to_vsp1_video(vfh
->vdev
);
890 cap
->capabilities
= V4L2_CAP_DEVICE_CAPS
| V4L2_CAP_STREAMING
891 | V4L2_CAP_VIDEO_CAPTURE_MPLANE
892 | V4L2_CAP_VIDEO_OUTPUT_MPLANE
;
894 if (video
->type
== V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
)
895 cap
->device_caps
= V4L2_CAP_VIDEO_CAPTURE_MPLANE
896 | V4L2_CAP_STREAMING
;
898 cap
->device_caps
= V4L2_CAP_VIDEO_OUTPUT_MPLANE
899 | V4L2_CAP_STREAMING
;
901 strlcpy(cap
->driver
, "vsp1", sizeof(cap
->driver
));
902 strlcpy(cap
->card
, video
->video
.name
, sizeof(cap
->card
));
903 snprintf(cap
->bus_info
, sizeof(cap
->bus_info
), "platform:%s",
904 dev_name(video
->vsp1
->dev
));
910 vsp1_video_get_format(struct file
*file
, void *fh
, struct v4l2_format
*format
)
912 struct v4l2_fh
*vfh
= file
->private_data
;
913 struct vsp1_video
*video
= to_vsp1_video(vfh
->vdev
);
915 if (format
->type
!= video
->queue
.type
)
918 mutex_lock(&video
->lock
);
919 format
->fmt
.pix_mp
= video
->rwpf
->format
;
920 mutex_unlock(&video
->lock
);
926 vsp1_video_try_format(struct file
*file
, void *fh
, struct v4l2_format
*format
)
928 struct v4l2_fh
*vfh
= file
->private_data
;
929 struct vsp1_video
*video
= to_vsp1_video(vfh
->vdev
);
931 if (format
->type
!= video
->queue
.type
)
934 return __vsp1_video_try_format(video
, &format
->fmt
.pix_mp
, NULL
);
938 vsp1_video_set_format(struct file
*file
, void *fh
, struct v4l2_format
*format
)
940 struct v4l2_fh
*vfh
= file
->private_data
;
941 struct vsp1_video
*video
= to_vsp1_video(vfh
->vdev
);
942 const struct vsp1_format_info
*info
;
945 if (format
->type
!= video
->queue
.type
)
948 ret
= __vsp1_video_try_format(video
, &format
->fmt
.pix_mp
, &info
);
952 mutex_lock(&video
->lock
);
954 if (vb2_is_busy(&video
->queue
)) {
959 video
->rwpf
->format
= format
->fmt
.pix_mp
;
960 video
->rwpf
->fmtinfo
= info
;
963 mutex_unlock(&video
->lock
);
968 vsp1_video_streamon(struct file
*file
, void *fh
, enum v4l2_buf_type type
)
970 struct v4l2_fh
*vfh
= file
->private_data
;
971 struct vsp1_video
*video
= to_vsp1_video(vfh
->vdev
);
972 struct media_device
*mdev
= &video
->vsp1
->media_dev
;
973 struct vsp1_pipeline
*pipe
;
976 if (video
->queue
.owner
&& video
->queue
.owner
!= file
->private_data
)
979 /* Get a pipeline for the video node and start streaming on it. No link
980 * touching an entity in the pipeline can be activated or deactivated
981 * once streaming is started.
983 mutex_lock(&mdev
->graph_mutex
);
985 pipe
= vsp1_video_pipeline_get(video
);
987 mutex_unlock(&mdev
->graph_mutex
);
988 return PTR_ERR(pipe
);
991 ret
= __media_entity_pipeline_start(&video
->video
.entity
, &pipe
->pipe
);
993 mutex_unlock(&mdev
->graph_mutex
);
997 mutex_unlock(&mdev
->graph_mutex
);
999 /* Verify that the configured format matches the output of the connected
1002 ret
= vsp1_video_verify_format(video
);
1006 /* Start the queue. */
1007 ret
= vb2_streamon(&video
->queue
, type
);
1014 media_entity_pipeline_stop(&video
->video
.entity
);
1016 vsp1_video_pipeline_put(pipe
);
1020 static const struct v4l2_ioctl_ops vsp1_video_ioctl_ops
= {
1021 .vidioc_querycap
= vsp1_video_querycap
,
1022 .vidioc_g_fmt_vid_cap_mplane
= vsp1_video_get_format
,
1023 .vidioc_s_fmt_vid_cap_mplane
= vsp1_video_set_format
,
1024 .vidioc_try_fmt_vid_cap_mplane
= vsp1_video_try_format
,
1025 .vidioc_g_fmt_vid_out_mplane
= vsp1_video_get_format
,
1026 .vidioc_s_fmt_vid_out_mplane
= vsp1_video_set_format
,
1027 .vidioc_try_fmt_vid_out_mplane
= vsp1_video_try_format
,
1028 .vidioc_reqbufs
= vb2_ioctl_reqbufs
,
1029 .vidioc_querybuf
= vb2_ioctl_querybuf
,
1030 .vidioc_qbuf
= vb2_ioctl_qbuf
,
1031 .vidioc_dqbuf
= vb2_ioctl_dqbuf
,
1032 .vidioc_create_bufs
= vb2_ioctl_create_bufs
,
1033 .vidioc_prepare_buf
= vb2_ioctl_prepare_buf
,
1034 .vidioc_streamon
= vsp1_video_streamon
,
1035 .vidioc_streamoff
= vb2_ioctl_streamoff
,
1038 /* -----------------------------------------------------------------------------
1039 * V4L2 File Operations
1042 static int vsp1_video_open(struct file
*file
)
1044 struct vsp1_video
*video
= video_drvdata(file
);
1045 struct v4l2_fh
*vfh
;
1048 vfh
= kzalloc(sizeof(*vfh
), GFP_KERNEL
);
1052 v4l2_fh_init(vfh
, &video
->video
);
1055 file
->private_data
= vfh
;
1057 ret
= vsp1_device_get(video
->vsp1
);
1066 static int vsp1_video_release(struct file
*file
)
1068 struct vsp1_video
*video
= video_drvdata(file
);
1069 struct v4l2_fh
*vfh
= file
->private_data
;
1071 mutex_lock(&video
->lock
);
1072 if (video
->queue
.owner
== vfh
) {
1073 vb2_queue_release(&video
->queue
);
1074 video
->queue
.owner
= NULL
;
1076 mutex_unlock(&video
->lock
);
1078 vsp1_device_put(video
->vsp1
);
1080 v4l2_fh_release(file
);
1082 file
->private_data
= NULL
;
1087 static const struct v4l2_file_operations vsp1_video_fops
= {
1088 .owner
= THIS_MODULE
,
1089 .unlocked_ioctl
= video_ioctl2
,
1090 .open
= vsp1_video_open
,
1091 .release
= vsp1_video_release
,
1092 .poll
= vb2_fop_poll
,
1093 .mmap
= vb2_fop_mmap
,
1096 /* -----------------------------------------------------------------------------
1097 * Initialization and Cleanup
1100 struct vsp1_video
*vsp1_video_create(struct vsp1_device
*vsp1
,
1101 struct vsp1_rwpf
*rwpf
)
1103 struct vsp1_video
*video
;
1104 const char *direction
;
1107 video
= devm_kzalloc(vsp1
->dev
, sizeof(*video
), GFP_KERNEL
);
1109 return ERR_PTR(-ENOMEM
);
1111 rwpf
->video
= video
;
1116 if (rwpf
->entity
.type
== VSP1_ENTITY_RPF
) {
1117 direction
= "input";
1118 video
->type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
1119 video
->pad
.flags
= MEDIA_PAD_FL_SOURCE
;
1120 video
->video
.vfl_dir
= VFL_DIR_TX
;
1122 direction
= "output";
1123 video
->type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
1124 video
->pad
.flags
= MEDIA_PAD_FL_SINK
;
1125 video
->video
.vfl_dir
= VFL_DIR_RX
;
1128 mutex_init(&video
->lock
);
1129 spin_lock_init(&video
->irqlock
);
1130 INIT_LIST_HEAD(&video
->irqqueue
);
1132 /* Initialize the media entity... */
1133 ret
= media_entity_pads_init(&video
->video
.entity
, 1, &video
->pad
);
1135 return ERR_PTR(ret
);
1137 /* ... and the format ... */
1138 rwpf
->format
.pixelformat
= VSP1_VIDEO_DEF_FORMAT
;
1139 rwpf
->format
.width
= VSP1_VIDEO_DEF_WIDTH
;
1140 rwpf
->format
.height
= VSP1_VIDEO_DEF_HEIGHT
;
1141 __vsp1_video_try_format(video
, &rwpf
->format
, &rwpf
->fmtinfo
);
1143 /* ... and the video node... */
1144 video
->video
.v4l2_dev
= &video
->vsp1
->v4l2_dev
;
1145 video
->video
.fops
= &vsp1_video_fops
;
1146 snprintf(video
->video
.name
, sizeof(video
->video
.name
), "%s %s",
1147 rwpf
->entity
.subdev
.name
, direction
);
1148 video
->video
.vfl_type
= VFL_TYPE_GRABBER
;
1149 video
->video
.release
= video_device_release_empty
;
1150 video
->video
.ioctl_ops
= &vsp1_video_ioctl_ops
;
1152 video_set_drvdata(&video
->video
, video
);
1154 video
->queue
.type
= video
->type
;
1155 video
->queue
.io_modes
= VB2_MMAP
| VB2_USERPTR
| VB2_DMABUF
;
1156 video
->queue
.lock
= &video
->lock
;
1157 video
->queue
.drv_priv
= video
;
1158 video
->queue
.buf_struct_size
= sizeof(struct vsp1_vb2_buffer
);
1159 video
->queue
.ops
= &vsp1_video_queue_qops
;
1160 video
->queue
.mem_ops
= &vb2_dma_contig_memops
;
1161 video
->queue
.timestamp_flags
= V4L2_BUF_FLAG_TIMESTAMP_COPY
;
1162 video
->queue
.dev
= video
->vsp1
->dev
;
1163 ret
= vb2_queue_init(&video
->queue
);
1165 dev_err(video
->vsp1
->dev
, "failed to initialize vb2 queue\n");
1169 /* ... and register the video device. */
1170 video
->video
.queue
= &video
->queue
;
1171 ret
= video_register_device(&video
->video
, VFL_TYPE_GRABBER
, -1);
1173 dev_err(video
->vsp1
->dev
, "failed to register video device\n");
1180 vsp1_video_cleanup(video
);
1181 return ERR_PTR(ret
);
1184 void vsp1_video_cleanup(struct vsp1_video
*video
)
1186 if (video_is_registered(&video
->video
))
1187 video_unregister_device(&video
->video
);
1189 media_entity_cleanup(&video
->video
.entity
);