1 // SPDX-License-Identifier: GPL-2.0+
3 * vsp1_video.c -- R-Car VSP1 Video Node
5 * Copyright (C) 2013-2015 Renesas Electronics Corporation
7 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
10 #include <linux/list.h>
11 #include <linux/module.h>
12 #include <linux/mutex.h>
13 #include <linux/slab.h>
14 #include <linux/v4l2-mediabus.h>
15 #include <linux/videodev2.h>
16 #include <linux/wait.h>
18 #include <media/media-entity.h>
19 #include <media/v4l2-dev.h>
20 #include <media/v4l2-fh.h>
21 #include <media/v4l2-ioctl.h>
22 #include <media/v4l2-subdev.h>
23 #include <media/videobuf2-v4l2.h>
24 #include <media/videobuf2-dma-contig.h>
29 #include "vsp1_entity.h"
32 #include "vsp1_pipe.h"
33 #include "vsp1_rwpf.h"
35 #include "vsp1_video.h"
37 #define VSP1_VIDEO_DEF_FORMAT V4L2_PIX_FMT_YUYV
38 #define VSP1_VIDEO_DEF_WIDTH 1024
39 #define VSP1_VIDEO_DEF_HEIGHT 768
41 #define VSP1_VIDEO_MIN_WIDTH 2U
42 #define VSP1_VIDEO_MAX_WIDTH 8190U
43 #define VSP1_VIDEO_MIN_HEIGHT 2U
44 #define VSP1_VIDEO_MAX_HEIGHT 8190U
46 /* -----------------------------------------------------------------------------
50 static struct v4l2_subdev
*
51 vsp1_video_remote_subdev(struct media_pad
*local
, u32
*pad
)
53 struct media_pad
*remote
;
55 remote
= media_entity_remote_pad(local
);
56 if (!remote
|| !is_media_entity_v4l2_subdev(remote
->entity
))
62 return media_entity_to_v4l2_subdev(remote
->entity
);
65 static int vsp1_video_verify_format(struct vsp1_video
*video
)
67 struct v4l2_subdev_format fmt
;
68 struct v4l2_subdev
*subdev
;
71 subdev
= vsp1_video_remote_subdev(&video
->pad
, &fmt
.pad
);
75 fmt
.which
= V4L2_SUBDEV_FORMAT_ACTIVE
;
76 ret
= v4l2_subdev_call(subdev
, pad
, get_fmt
, NULL
, &fmt
);
78 return ret
== -ENOIOCTLCMD
? -EINVAL
: ret
;
80 if (video
->rwpf
->fmtinfo
->mbus
!= fmt
.format
.code
||
81 video
->rwpf
->format
.height
!= fmt
.format
.height
||
82 video
->rwpf
->format
.width
!= fmt
.format
.width
)
88 static int __vsp1_video_try_format(struct vsp1_video
*video
,
89 struct v4l2_pix_format_mplane
*pix
,
90 const struct vsp1_format_info
**fmtinfo
)
92 static const u32 xrgb_formats
[][2] = {
93 { V4L2_PIX_FMT_RGB444
, V4L2_PIX_FMT_XRGB444
},
94 { V4L2_PIX_FMT_RGB555
, V4L2_PIX_FMT_XRGB555
},
95 { V4L2_PIX_FMT_BGR32
, V4L2_PIX_FMT_XBGR32
},
96 { V4L2_PIX_FMT_RGB32
, V4L2_PIX_FMT_XRGB32
},
99 const struct vsp1_format_info
*info
;
100 unsigned int width
= pix
->width
;
101 unsigned int height
= pix
->height
;
105 * Backward compatibility: replace deprecated RGB formats by their XRGB
106 * equivalent. This selects the format older userspace applications want
107 * while still exposing the new format.
109 for (i
= 0; i
< ARRAY_SIZE(xrgb_formats
); ++i
) {
110 if (xrgb_formats
[i
][0] == pix
->pixelformat
) {
111 pix
->pixelformat
= xrgb_formats
[i
][1];
117 * Retrieve format information and select the default format if the
118 * requested format isn't supported.
120 info
= vsp1_get_format_info(video
->vsp1
, pix
->pixelformat
);
122 info
= vsp1_get_format_info(video
->vsp1
, VSP1_VIDEO_DEF_FORMAT
);
124 pix
->pixelformat
= info
->fourcc
;
125 pix
->colorspace
= V4L2_COLORSPACE_SRGB
;
126 pix
->field
= V4L2_FIELD_NONE
;
128 if (info
->fourcc
== V4L2_PIX_FMT_HSV24
||
129 info
->fourcc
== V4L2_PIX_FMT_HSV32
)
130 pix
->hsv_enc
= V4L2_HSV_ENC_256
;
132 memset(pix
->reserved
, 0, sizeof(pix
->reserved
));
134 /* Align the width and height for YUV 4:2:2 and 4:2:0 formats. */
135 width
= round_down(width
, info
->hsub
);
136 height
= round_down(height
, info
->vsub
);
138 /* Clamp the width and height. */
139 pix
->width
= clamp(width
, VSP1_VIDEO_MIN_WIDTH
, VSP1_VIDEO_MAX_WIDTH
);
140 pix
->height
= clamp(height
, VSP1_VIDEO_MIN_HEIGHT
,
141 VSP1_VIDEO_MAX_HEIGHT
);
144 * Compute and clamp the stride and image size. While not documented in
145 * the datasheet, strides not aligned to a multiple of 128 bytes result
146 * in image corruption.
148 for (i
= 0; i
< min(info
->planes
, 2U); ++i
) {
149 unsigned int hsub
= i
> 0 ? info
->hsub
: 1;
150 unsigned int vsub
= i
> 0 ? info
->vsub
: 1;
151 unsigned int align
= 128;
154 bpl
= clamp_t(unsigned int, pix
->plane_fmt
[i
].bytesperline
,
155 pix
->width
/ hsub
* info
->bpp
[i
] / 8,
156 round_down(65535U, align
));
158 pix
->plane_fmt
[i
].bytesperline
= round_up(bpl
, align
);
159 pix
->plane_fmt
[i
].sizeimage
= pix
->plane_fmt
[i
].bytesperline
160 * pix
->height
/ vsub
;
163 if (info
->planes
== 3) {
164 /* The second and third planes must have the same stride. */
165 pix
->plane_fmt
[2].bytesperline
= pix
->plane_fmt
[1].bytesperline
;
166 pix
->plane_fmt
[2].sizeimage
= pix
->plane_fmt
[1].sizeimage
;
169 pix
->num_planes
= info
->planes
;
177 /* -----------------------------------------------------------------------------
178 * VSP1 Partition Algorithm support
182 * vsp1_video_calculate_partition - Calculate the active partition output window
184 * @pipe: the pipeline
185 * @partition: partition that will hold the calculated values
186 * @div_size: pre-determined maximum partition division size
187 * @index: partition index
189 static void vsp1_video_calculate_partition(struct vsp1_pipeline
*pipe
,
190 struct vsp1_partition
*partition
,
191 unsigned int div_size
,
194 const struct v4l2_mbus_framefmt
*format
;
195 struct vsp1_partition_window window
;
196 unsigned int modulus
;
199 * Partitions are computed on the size before rotation, use the format
202 format
= vsp1_entity_get_pad_format(&pipe
->output
->entity
,
203 pipe
->output
->entity
.config
,
206 /* A single partition simply processes the output size in full. */
207 if (pipe
->partitions
<= 1) {
209 window
.width
= format
->width
;
211 vsp1_pipeline_propagate_partition(pipe
, partition
, index
,
216 /* Initialise the partition with sane starting conditions. */
217 window
.left
= index
* div_size
;
218 window
.width
= div_size
;
220 modulus
= format
->width
% div_size
;
223 * We need to prevent the last partition from being smaller than the
224 * *minimum* width of the hardware capabilities.
226 * If the modulus is less than half of the partition size,
227 * the penultimate partition is reduced to half, which is added
228 * to the final partition: |1234|1234|1234|12|341|
229 * to prevents this: |1234|1234|1234|1234|1|.
233 * pipe->partitions is 1 based, whilst index is a 0 based index.
234 * Normalise this locally.
236 unsigned int partitions
= pipe
->partitions
- 1;
238 if (modulus
< div_size
/ 2) {
239 if (index
== partitions
- 1) {
240 /* Halve the penultimate partition. */
241 window
.width
= div_size
/ 2;
242 } else if (index
== partitions
) {
243 /* Increase the final partition. */
244 window
.width
= (div_size
/ 2) + modulus
;
245 window
.left
-= div_size
/ 2;
247 } else if (index
== partitions
) {
248 window
.width
= modulus
;
252 vsp1_pipeline_propagate_partition(pipe
, partition
, index
, &window
);
255 static int vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline
*pipe
)
257 struct vsp1_device
*vsp1
= pipe
->output
->entity
.vsp1
;
258 const struct v4l2_mbus_framefmt
*format
;
259 struct vsp1_entity
*entity
;
260 unsigned int div_size
;
264 * Partitions are computed on the size before rotation, use the format
267 format
= vsp1_entity_get_pad_format(&pipe
->output
->entity
,
268 pipe
->output
->entity
.config
,
270 div_size
= format
->width
;
273 * Only Gen3 hardware requires image partitioning, Gen2 will operate
274 * with a single partition that covers the whole output.
276 if (vsp1
->info
->gen
== 3) {
277 list_for_each_entry(entity
, &pipe
->entities
, list_pipe
) {
278 unsigned int entity_max
;
280 if (!entity
->ops
->max_width
)
283 entity_max
= entity
->ops
->max_width(entity
, pipe
);
285 div_size
= min(div_size
, entity_max
);
289 pipe
->partitions
= DIV_ROUND_UP(format
->width
, div_size
);
290 pipe
->part_table
= kcalloc(pipe
->partitions
, sizeof(*pipe
->part_table
),
292 if (!pipe
->part_table
)
295 for (i
= 0; i
< pipe
->partitions
; ++i
)
296 vsp1_video_calculate_partition(pipe
, &pipe
->part_table
[i
],
302 /* -----------------------------------------------------------------------------
303 * Pipeline Management
307 * vsp1_video_complete_buffer - Complete the current buffer
308 * @video: the video node
310 * This function completes the current buffer by filling its sequence number,
311 * time stamp and payload size, and hands it back to the videobuf core.
313 * When operating in DU output mode (deep pipeline to the DU through the LIF),
314 * the VSP1 needs to constantly supply frames to the display. In that case, if
315 * no other buffer is queued, reuse the one that has just been processed instead
316 * of handing it back to the videobuf core.
318 * Return the next queued buffer or NULL if the queue is empty.
320 static struct vsp1_vb2_buffer
*
321 vsp1_video_complete_buffer(struct vsp1_video
*video
)
323 struct vsp1_pipeline
*pipe
= video
->rwpf
->entity
.pipe
;
324 struct vsp1_vb2_buffer
*next
= NULL
;
325 struct vsp1_vb2_buffer
*done
;
329 spin_lock_irqsave(&video
->irqlock
, flags
);
331 if (list_empty(&video
->irqqueue
)) {
332 spin_unlock_irqrestore(&video
->irqlock
, flags
);
336 done
= list_first_entry(&video
->irqqueue
,
337 struct vsp1_vb2_buffer
, queue
);
339 /* In DU output mode reuse the buffer if the list is singular. */
340 if (pipe
->lif
&& list_is_singular(&video
->irqqueue
)) {
341 spin_unlock_irqrestore(&video
->irqlock
, flags
);
345 list_del(&done
->queue
);
347 if (!list_empty(&video
->irqqueue
))
348 next
= list_first_entry(&video
->irqqueue
,
349 struct vsp1_vb2_buffer
, queue
);
351 spin_unlock_irqrestore(&video
->irqlock
, flags
);
353 done
->buf
.sequence
= pipe
->sequence
;
354 done
->buf
.vb2_buf
.timestamp
= ktime_get_ns();
355 for (i
= 0; i
< done
->buf
.vb2_buf
.num_planes
; ++i
)
356 vb2_set_plane_payload(&done
->buf
.vb2_buf
, i
,
357 vb2_plane_size(&done
->buf
.vb2_buf
, i
));
358 vb2_buffer_done(&done
->buf
.vb2_buf
, VB2_BUF_STATE_DONE
);
363 static void vsp1_video_frame_end(struct vsp1_pipeline
*pipe
,
364 struct vsp1_rwpf
*rwpf
)
366 struct vsp1_video
*video
= rwpf
->video
;
367 struct vsp1_vb2_buffer
*buf
;
369 buf
= vsp1_video_complete_buffer(video
);
373 video
->rwpf
->mem
= buf
->mem
;
374 pipe
->buffers_ready
|= 1 << video
->pipe_index
;
377 static void vsp1_video_pipeline_run_partition(struct vsp1_pipeline
*pipe
,
378 struct vsp1_dl_list
*dl
,
379 unsigned int partition
)
381 struct vsp1_dl_body
*dlb
= vsp1_dl_list_get_body0(dl
);
382 struct vsp1_entity
*entity
;
384 pipe
->partition
= &pipe
->part_table
[partition
];
386 list_for_each_entry(entity
, &pipe
->entities
, list_pipe
)
387 vsp1_entity_configure_partition(entity
, pipe
, dl
, dlb
);
390 static void vsp1_video_pipeline_run(struct vsp1_pipeline
*pipe
)
392 struct vsp1_device
*vsp1
= pipe
->output
->entity
.vsp1
;
393 struct vsp1_entity
*entity
;
394 struct vsp1_dl_body
*dlb
;
395 struct vsp1_dl_list
*dl
;
396 unsigned int partition
;
398 dl
= vsp1_dl_list_get(pipe
->output
->dlm
);
401 * If the VSP hardware isn't configured yet (which occurs either when
402 * processing the first frame or after a system suspend/resume), add the
403 * cached stream configuration to the display list to perform a full
406 if (!pipe
->configured
)
407 vsp1_dl_list_add_body(dl
, pipe
->stream_config
);
409 dlb
= vsp1_dl_list_get_body0(dl
);
411 list_for_each_entry(entity
, &pipe
->entities
, list_pipe
)
412 vsp1_entity_configure_frame(entity
, pipe
, dl
, dlb
);
414 /* Run the first partition. */
415 vsp1_video_pipeline_run_partition(pipe
, dl
, 0);
417 /* Process consecutive partitions as necessary. */
418 for (partition
= 1; partition
< pipe
->partitions
; ++partition
) {
419 struct vsp1_dl_list
*dl_next
;
421 dl_next
= vsp1_dl_list_get(pipe
->output
->dlm
);
424 * An incomplete chain will still function, but output only
425 * the partitions that had a dl available. The frame end
426 * interrupt will be marked on the last dl in the chain.
429 dev_err(vsp1
->dev
, "Failed to obtain a dl list. Frame will be incomplete\n");
433 vsp1_video_pipeline_run_partition(pipe
, dl_next
, partition
);
434 vsp1_dl_list_add_chain(dl
, dl_next
);
437 /* Complete, and commit the head display list. */
438 vsp1_dl_list_commit(dl
, false);
439 pipe
->configured
= true;
441 vsp1_pipeline_run(pipe
);
444 static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline
*pipe
,
445 unsigned int completion
)
447 struct vsp1_device
*vsp1
= pipe
->output
->entity
.vsp1
;
448 enum vsp1_pipeline_state state
;
452 /* M2M Pipelines should never call here with an incomplete frame. */
453 WARN_ON_ONCE(!(completion
& VSP1_DL_FRAME_END_COMPLETED
));
455 spin_lock_irqsave(&pipe
->irqlock
, flags
);
457 /* Complete buffers on all video nodes. */
458 for (i
= 0; i
< vsp1
->info
->rpf_count
; ++i
) {
459 if (!pipe
->inputs
[i
])
462 vsp1_video_frame_end(pipe
, pipe
->inputs
[i
]);
465 vsp1_video_frame_end(pipe
, pipe
->output
);
468 pipe
->state
= VSP1_PIPELINE_STOPPED
;
471 * If a stop has been requested, mark the pipeline as stopped and
472 * return. Otherwise restart the pipeline if ready.
474 if (state
== VSP1_PIPELINE_STOPPING
)
476 else if (vsp1_pipeline_ready(pipe
))
477 vsp1_video_pipeline_run(pipe
);
479 spin_unlock_irqrestore(&pipe
->irqlock
, flags
);
482 static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline
*pipe
,
483 struct vsp1_rwpf
*input
,
484 struct vsp1_rwpf
*output
)
486 struct media_entity_enum ent_enum
;
487 struct vsp1_entity
*entity
;
488 struct media_pad
*pad
;
489 struct vsp1_brx
*brx
= NULL
;
492 ret
= media_entity_enum_init(&ent_enum
, &input
->entity
.vsp1
->media_dev
);
497 * The main data path doesn't include the HGO or HGT, use
498 * vsp1_entity_remote_pad() to traverse the graph.
501 pad
= vsp1_entity_remote_pad(&input
->entity
.pads
[RWPF_PAD_SOURCE
]);
509 /* We've reached a video node, that shouldn't have happened. */
510 if (!is_media_entity_v4l2_subdev(pad
->entity
)) {
515 entity
= to_vsp1_entity(
516 media_entity_to_v4l2_subdev(pad
->entity
));
519 * A BRU or BRS is present in the pipeline, store its input pad
520 * number in the input RPF for use when configuring the RPF.
522 if (entity
->type
== VSP1_ENTITY_BRU
||
523 entity
->type
== VSP1_ENTITY_BRS
) {
524 /* BRU and BRS can't be chained. */
530 brx
= to_brx(&entity
->subdev
);
531 brx
->inputs
[pad
->index
].rpf
= input
;
532 input
->brx_input
= pad
->index
;
535 /* We've reached the WPF, we're done. */
536 if (entity
->type
== VSP1_ENTITY_WPF
)
539 /* Ensure the branch has no loop. */
540 if (media_entity_enum_test_and_set(&ent_enum
,
541 &entity
->subdev
.entity
)) {
546 /* UDS can't be chained. */
547 if (entity
->type
== VSP1_ENTITY_UDS
) {
554 pipe
->uds_input
= brx
? &brx
->entity
: &input
->entity
;
557 /* Follow the source link, ignoring any HGO or HGT. */
558 pad
= &entity
->pads
[entity
->source_pad
];
559 pad
= vsp1_entity_remote_pad(pad
);
562 /* The last entity must be the output WPF. */
563 if (entity
!= &output
->entity
)
567 media_entity_enum_cleanup(&ent_enum
);
572 static int vsp1_video_pipeline_build(struct vsp1_pipeline
*pipe
,
573 struct vsp1_video
*video
)
575 struct media_graph graph
;
576 struct media_entity
*entity
= &video
->video
.entity
;
577 struct media_device
*mdev
= entity
->graph_obj
.mdev
;
581 /* Walk the graph to locate the entities and video nodes. */
582 ret
= media_graph_walk_init(&graph
, mdev
);
586 media_graph_walk_start(&graph
, entity
);
588 while ((entity
= media_graph_walk_next(&graph
))) {
589 struct v4l2_subdev
*subdev
;
590 struct vsp1_rwpf
*rwpf
;
591 struct vsp1_entity
*e
;
593 if (!is_media_entity_v4l2_subdev(entity
))
596 subdev
= media_entity_to_v4l2_subdev(entity
);
597 e
= to_vsp1_entity(subdev
);
598 list_add_tail(&e
->list_pipe
, &pipe
->entities
);
602 case VSP1_ENTITY_RPF
:
603 rwpf
= to_rwpf(subdev
);
604 pipe
->inputs
[rwpf
->entity
.index
] = rwpf
;
605 rwpf
->video
->pipe_index
= ++pipe
->num_inputs
;
608 case VSP1_ENTITY_WPF
:
609 rwpf
= to_rwpf(subdev
);
611 rwpf
->video
->pipe_index
= 0;
614 case VSP1_ENTITY_LIF
:
618 case VSP1_ENTITY_BRU
:
619 case VSP1_ENTITY_BRS
:
623 case VSP1_ENTITY_HGO
:
627 case VSP1_ENTITY_HGT
:
636 media_graph_walk_cleanup(&graph
);
638 /* We need one output and at least one input. */
639 if (pipe
->num_inputs
== 0 || !pipe
->output
)
643 * Follow links downstream for each input and make sure the graph
644 * contains no loop and that all branches end at the output WPF.
646 for (i
= 0; i
< video
->vsp1
->info
->rpf_count
; ++i
) {
647 if (!pipe
->inputs
[i
])
650 ret
= vsp1_video_pipeline_build_branch(pipe
, pipe
->inputs
[i
],
659 static int vsp1_video_pipeline_init(struct vsp1_pipeline
*pipe
,
660 struct vsp1_video
*video
)
662 vsp1_pipeline_init(pipe
);
664 pipe
->frame_end
= vsp1_video_pipeline_frame_end
;
666 return vsp1_video_pipeline_build(pipe
, video
);
669 static struct vsp1_pipeline
*vsp1_video_pipeline_get(struct vsp1_video
*video
)
671 struct vsp1_pipeline
*pipe
;
675 * Get a pipeline object for the video node. If a pipeline has already
676 * been allocated just increment its reference count and return it.
677 * Otherwise allocate a new pipeline and initialize it, it will be freed
678 * when the last reference is released.
680 if (!video
->rwpf
->entity
.pipe
) {
681 pipe
= kzalloc(sizeof(*pipe
), GFP_KERNEL
);
683 return ERR_PTR(-ENOMEM
);
685 ret
= vsp1_video_pipeline_init(pipe
, video
);
687 vsp1_pipeline_reset(pipe
);
692 pipe
= video
->rwpf
->entity
.pipe
;
693 kref_get(&pipe
->kref
);
699 static void vsp1_video_pipeline_release(struct kref
*kref
)
701 struct vsp1_pipeline
*pipe
= container_of(kref
, typeof(*pipe
), kref
);
703 vsp1_pipeline_reset(pipe
);
707 static void vsp1_video_pipeline_put(struct vsp1_pipeline
*pipe
)
709 struct media_device
*mdev
= &pipe
->output
->entity
.vsp1
->media_dev
;
711 mutex_lock(&mdev
->graph_mutex
);
712 kref_put(&pipe
->kref
, vsp1_video_pipeline_release
);
713 mutex_unlock(&mdev
->graph_mutex
);
716 /* -----------------------------------------------------------------------------
717 * videobuf2 Queue Operations
721 vsp1_video_queue_setup(struct vb2_queue
*vq
,
722 unsigned int *nbuffers
, unsigned int *nplanes
,
723 unsigned int sizes
[], struct device
*alloc_devs
[])
725 struct vsp1_video
*video
= vb2_get_drv_priv(vq
);
726 const struct v4l2_pix_format_mplane
*format
= &video
->rwpf
->format
;
730 if (*nplanes
!= format
->num_planes
)
733 for (i
= 0; i
< *nplanes
; i
++)
734 if (sizes
[i
] < format
->plane_fmt
[i
].sizeimage
)
739 *nplanes
= format
->num_planes
;
741 for (i
= 0; i
< format
->num_planes
; ++i
)
742 sizes
[i
] = format
->plane_fmt
[i
].sizeimage
;
747 static int vsp1_video_buffer_prepare(struct vb2_buffer
*vb
)
749 struct vb2_v4l2_buffer
*vbuf
= to_vb2_v4l2_buffer(vb
);
750 struct vsp1_video
*video
= vb2_get_drv_priv(vb
->vb2_queue
);
751 struct vsp1_vb2_buffer
*buf
= to_vsp1_vb2_buffer(vbuf
);
752 const struct v4l2_pix_format_mplane
*format
= &video
->rwpf
->format
;
755 if (vb
->num_planes
< format
->num_planes
)
758 for (i
= 0; i
< vb
->num_planes
; ++i
) {
759 buf
->mem
.addr
[i
] = vb2_dma_contig_plane_dma_addr(vb
, i
);
761 if (vb2_plane_size(vb
, i
) < format
->plane_fmt
[i
].sizeimage
)
766 buf
->mem
.addr
[i
] = 0;
771 static void vsp1_video_buffer_queue(struct vb2_buffer
*vb
)
773 struct vb2_v4l2_buffer
*vbuf
= to_vb2_v4l2_buffer(vb
);
774 struct vsp1_video
*video
= vb2_get_drv_priv(vb
->vb2_queue
);
775 struct vsp1_pipeline
*pipe
= video
->rwpf
->entity
.pipe
;
776 struct vsp1_vb2_buffer
*buf
= to_vsp1_vb2_buffer(vbuf
);
780 spin_lock_irqsave(&video
->irqlock
, flags
);
781 empty
= list_empty(&video
->irqqueue
);
782 list_add_tail(&buf
->queue
, &video
->irqqueue
);
783 spin_unlock_irqrestore(&video
->irqlock
, flags
);
788 spin_lock_irqsave(&pipe
->irqlock
, flags
);
790 video
->rwpf
->mem
= buf
->mem
;
791 pipe
->buffers_ready
|= 1 << video
->pipe_index
;
793 if (vb2_is_streaming(&video
->queue
) &&
794 vsp1_pipeline_ready(pipe
))
795 vsp1_video_pipeline_run(pipe
);
797 spin_unlock_irqrestore(&pipe
->irqlock
, flags
);
800 static int vsp1_video_setup_pipeline(struct vsp1_pipeline
*pipe
)
802 struct vsp1_entity
*entity
;
805 /* Determine this pipelines sizes for image partitioning support. */
806 ret
= vsp1_video_pipeline_setup_partitions(pipe
);
811 struct vsp1_uds
*uds
= to_uds(&pipe
->uds
->subdev
);
814 * If a BRU or BRS is present in the pipeline before the UDS,
815 * the alpha component doesn't need to be scaled as the BRU and
816 * BRS output alpha value is fixed to 255. Otherwise we need to
817 * scale the alpha component only when available at the input
820 if (pipe
->uds_input
->type
== VSP1_ENTITY_BRU
||
821 pipe
->uds_input
->type
== VSP1_ENTITY_BRS
) {
822 uds
->scale_alpha
= false;
824 struct vsp1_rwpf
*rpf
=
825 to_rwpf(&pipe
->uds_input
->subdev
);
827 uds
->scale_alpha
= rpf
->fmtinfo
->alpha
;
832 * Compute and cache the stream configuration into a body. The cached
833 * body will be added to the display list by vsp1_video_pipeline_run()
834 * whenever the pipeline needs to be fully reconfigured.
836 pipe
->stream_config
= vsp1_dlm_dl_body_get(pipe
->output
->dlm
);
837 if (!pipe
->stream_config
)
840 list_for_each_entry(entity
, &pipe
->entities
, list_pipe
) {
841 vsp1_entity_route_setup(entity
, pipe
, pipe
->stream_config
);
842 vsp1_entity_configure_stream(entity
, pipe
, pipe
->stream_config
);
848 static void vsp1_video_release_buffers(struct vsp1_video
*video
)
850 struct vsp1_vb2_buffer
*buffer
;
853 /* Remove all buffers from the IRQ queue. */
854 spin_lock_irqsave(&video
->irqlock
, flags
);
855 list_for_each_entry(buffer
, &video
->irqqueue
, queue
)
856 vb2_buffer_done(&buffer
->buf
.vb2_buf
, VB2_BUF_STATE_ERROR
);
857 INIT_LIST_HEAD(&video
->irqqueue
);
858 spin_unlock_irqrestore(&video
->irqlock
, flags
);
861 static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline
*pipe
)
863 lockdep_assert_held(&pipe
->lock
);
865 /* Release any cached configuration from our output video. */
866 vsp1_dl_body_put(pipe
->stream_config
);
867 pipe
->stream_config
= NULL
;
868 pipe
->configured
= false;
870 /* Release our partition table allocation */
871 kfree(pipe
->part_table
);
872 pipe
->part_table
= NULL
;
875 static int vsp1_video_start_streaming(struct vb2_queue
*vq
, unsigned int count
)
877 struct vsp1_video
*video
= vb2_get_drv_priv(vq
);
878 struct vsp1_pipeline
*pipe
= video
->rwpf
->entity
.pipe
;
879 bool start_pipeline
= false;
883 mutex_lock(&pipe
->lock
);
884 if (pipe
->stream_count
== pipe
->num_inputs
) {
885 ret
= vsp1_video_setup_pipeline(pipe
);
887 vsp1_video_release_buffers(video
);
888 vsp1_video_cleanup_pipeline(pipe
);
889 mutex_unlock(&pipe
->lock
);
893 start_pipeline
= true;
896 pipe
->stream_count
++;
897 mutex_unlock(&pipe
->lock
);
900 * vsp1_pipeline_ready() is not sufficient to establish that all streams
901 * are prepared and the pipeline is configured, as multiple streams
902 * can race through streamon with buffers already queued; Therefore we
903 * don't even attempt to start the pipeline until the last stream has
904 * called through here.
909 spin_lock_irqsave(&pipe
->irqlock
, flags
);
910 if (vsp1_pipeline_ready(pipe
))
911 vsp1_video_pipeline_run(pipe
);
912 spin_unlock_irqrestore(&pipe
->irqlock
, flags
);
917 static void vsp1_video_stop_streaming(struct vb2_queue
*vq
)
919 struct vsp1_video
*video
= vb2_get_drv_priv(vq
);
920 struct vsp1_pipeline
*pipe
= video
->rwpf
->entity
.pipe
;
925 * Clear the buffers ready flag to make sure the device won't be started
926 * by a QBUF on the video node on the other side of the pipeline.
928 spin_lock_irqsave(&video
->irqlock
, flags
);
929 pipe
->buffers_ready
&= ~(1 << video
->pipe_index
);
930 spin_unlock_irqrestore(&video
->irqlock
, flags
);
932 mutex_lock(&pipe
->lock
);
933 if (--pipe
->stream_count
== pipe
->num_inputs
) {
934 /* Stop the pipeline. */
935 ret
= vsp1_pipeline_stop(pipe
);
936 if (ret
== -ETIMEDOUT
)
937 dev_err(video
->vsp1
->dev
, "pipeline stop timeout\n");
939 vsp1_video_cleanup_pipeline(pipe
);
941 mutex_unlock(&pipe
->lock
);
943 media_pipeline_stop(&video
->video
.entity
);
944 vsp1_video_release_buffers(video
);
945 vsp1_video_pipeline_put(pipe
);
948 static const struct vb2_ops vsp1_video_queue_qops
= {
949 .queue_setup
= vsp1_video_queue_setup
,
950 .buf_prepare
= vsp1_video_buffer_prepare
,
951 .buf_queue
= vsp1_video_buffer_queue
,
952 .wait_prepare
= vb2_ops_wait_prepare
,
953 .wait_finish
= vb2_ops_wait_finish
,
954 .start_streaming
= vsp1_video_start_streaming
,
955 .stop_streaming
= vsp1_video_stop_streaming
,
958 /* -----------------------------------------------------------------------------
963 vsp1_video_querycap(struct file
*file
, void *fh
, struct v4l2_capability
*cap
)
965 struct v4l2_fh
*vfh
= file
->private_data
;
966 struct vsp1_video
*video
= to_vsp1_video(vfh
->vdev
);
968 cap
->capabilities
= V4L2_CAP_DEVICE_CAPS
| V4L2_CAP_STREAMING
969 | V4L2_CAP_VIDEO_CAPTURE_MPLANE
970 | V4L2_CAP_VIDEO_OUTPUT_MPLANE
;
972 if (video
->type
== V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
)
973 cap
->device_caps
= V4L2_CAP_VIDEO_CAPTURE_MPLANE
974 | V4L2_CAP_STREAMING
;
976 cap
->device_caps
= V4L2_CAP_VIDEO_OUTPUT_MPLANE
977 | V4L2_CAP_STREAMING
;
979 strlcpy(cap
->driver
, "vsp1", sizeof(cap
->driver
));
980 strlcpy(cap
->card
, video
->video
.name
, sizeof(cap
->card
));
981 snprintf(cap
->bus_info
, sizeof(cap
->bus_info
), "platform:%s",
982 dev_name(video
->vsp1
->dev
));
988 vsp1_video_get_format(struct file
*file
, void *fh
, struct v4l2_format
*format
)
990 struct v4l2_fh
*vfh
= file
->private_data
;
991 struct vsp1_video
*video
= to_vsp1_video(vfh
->vdev
);
993 if (format
->type
!= video
->queue
.type
)
996 mutex_lock(&video
->lock
);
997 format
->fmt
.pix_mp
= video
->rwpf
->format
;
998 mutex_unlock(&video
->lock
);
1004 vsp1_video_try_format(struct file
*file
, void *fh
, struct v4l2_format
*format
)
1006 struct v4l2_fh
*vfh
= file
->private_data
;
1007 struct vsp1_video
*video
= to_vsp1_video(vfh
->vdev
);
1009 if (format
->type
!= video
->queue
.type
)
1012 return __vsp1_video_try_format(video
, &format
->fmt
.pix_mp
, NULL
);
1016 vsp1_video_set_format(struct file
*file
, void *fh
, struct v4l2_format
*format
)
1018 struct v4l2_fh
*vfh
= file
->private_data
;
1019 struct vsp1_video
*video
= to_vsp1_video(vfh
->vdev
);
1020 const struct vsp1_format_info
*info
;
1023 if (format
->type
!= video
->queue
.type
)
1026 ret
= __vsp1_video_try_format(video
, &format
->fmt
.pix_mp
, &info
);
1030 mutex_lock(&video
->lock
);
1032 if (vb2_is_busy(&video
->queue
)) {
1037 video
->rwpf
->format
= format
->fmt
.pix_mp
;
1038 video
->rwpf
->fmtinfo
= info
;
1041 mutex_unlock(&video
->lock
);
1046 vsp1_video_streamon(struct file
*file
, void *fh
, enum v4l2_buf_type type
)
1048 struct v4l2_fh
*vfh
= file
->private_data
;
1049 struct vsp1_video
*video
= to_vsp1_video(vfh
->vdev
);
1050 struct media_device
*mdev
= &video
->vsp1
->media_dev
;
1051 struct vsp1_pipeline
*pipe
;
1054 if (video
->queue
.owner
&& video
->queue
.owner
!= file
->private_data
)
1058 * Get a pipeline for the video node and start streaming on it. No link
1059 * touching an entity in the pipeline can be activated or deactivated
1060 * once streaming is started.
1062 mutex_lock(&mdev
->graph_mutex
);
1064 pipe
= vsp1_video_pipeline_get(video
);
1066 mutex_unlock(&mdev
->graph_mutex
);
1067 return PTR_ERR(pipe
);
1070 ret
= __media_pipeline_start(&video
->video
.entity
, &pipe
->pipe
);
1072 mutex_unlock(&mdev
->graph_mutex
);
1076 mutex_unlock(&mdev
->graph_mutex
);
1079 * Verify that the configured format matches the output of the connected
1082 ret
= vsp1_video_verify_format(video
);
1086 /* Start the queue. */
1087 ret
= vb2_streamon(&video
->queue
, type
);
1094 media_pipeline_stop(&video
->video
.entity
);
1096 vsp1_video_pipeline_put(pipe
);
1100 static const struct v4l2_ioctl_ops vsp1_video_ioctl_ops
= {
1101 .vidioc_querycap
= vsp1_video_querycap
,
1102 .vidioc_g_fmt_vid_cap_mplane
= vsp1_video_get_format
,
1103 .vidioc_s_fmt_vid_cap_mplane
= vsp1_video_set_format
,
1104 .vidioc_try_fmt_vid_cap_mplane
= vsp1_video_try_format
,
1105 .vidioc_g_fmt_vid_out_mplane
= vsp1_video_get_format
,
1106 .vidioc_s_fmt_vid_out_mplane
= vsp1_video_set_format
,
1107 .vidioc_try_fmt_vid_out_mplane
= vsp1_video_try_format
,
1108 .vidioc_reqbufs
= vb2_ioctl_reqbufs
,
1109 .vidioc_querybuf
= vb2_ioctl_querybuf
,
1110 .vidioc_qbuf
= vb2_ioctl_qbuf
,
1111 .vidioc_dqbuf
= vb2_ioctl_dqbuf
,
1112 .vidioc_expbuf
= vb2_ioctl_expbuf
,
1113 .vidioc_create_bufs
= vb2_ioctl_create_bufs
,
1114 .vidioc_prepare_buf
= vb2_ioctl_prepare_buf
,
1115 .vidioc_streamon
= vsp1_video_streamon
,
1116 .vidioc_streamoff
= vb2_ioctl_streamoff
,
1119 /* -----------------------------------------------------------------------------
1120 * V4L2 File Operations
1123 static int vsp1_video_open(struct file
*file
)
1125 struct vsp1_video
*video
= video_drvdata(file
);
1126 struct v4l2_fh
*vfh
;
1129 vfh
= kzalloc(sizeof(*vfh
), GFP_KERNEL
);
1133 v4l2_fh_init(vfh
, &video
->video
);
1136 file
->private_data
= vfh
;
1138 ret
= vsp1_device_get(video
->vsp1
);
1148 static int vsp1_video_release(struct file
*file
)
1150 struct vsp1_video
*video
= video_drvdata(file
);
1151 struct v4l2_fh
*vfh
= file
->private_data
;
1153 mutex_lock(&video
->lock
);
1154 if (video
->queue
.owner
== vfh
) {
1155 vb2_queue_release(&video
->queue
);
1156 video
->queue
.owner
= NULL
;
1158 mutex_unlock(&video
->lock
);
1160 vsp1_device_put(video
->vsp1
);
1162 v4l2_fh_release(file
);
1164 file
->private_data
= NULL
;
1169 static const struct v4l2_file_operations vsp1_video_fops
= {
1170 .owner
= THIS_MODULE
,
1171 .unlocked_ioctl
= video_ioctl2
,
1172 .open
= vsp1_video_open
,
1173 .release
= vsp1_video_release
,
1174 .poll
= vb2_fop_poll
,
1175 .mmap
= vb2_fop_mmap
,
1178 /* -----------------------------------------------------------------------------
1179 * Suspend and Resume
1182 void vsp1_video_suspend(struct vsp1_device
*vsp1
)
1184 unsigned long flags
;
1189 * To avoid increasing the system suspend time needlessly, loop over the
1190 * pipelines twice, first to set them all to the stopping state, and
1191 * then to wait for the stop to complete.
1193 for (i
= 0; i
< vsp1
->info
->wpf_count
; ++i
) {
1194 struct vsp1_rwpf
*wpf
= vsp1
->wpf
[i
];
1195 struct vsp1_pipeline
*pipe
;
1200 pipe
= wpf
->entity
.pipe
;
1204 spin_lock_irqsave(&pipe
->irqlock
, flags
);
1205 if (pipe
->state
== VSP1_PIPELINE_RUNNING
)
1206 pipe
->state
= VSP1_PIPELINE_STOPPING
;
1207 spin_unlock_irqrestore(&pipe
->irqlock
, flags
);
1210 for (i
= 0; i
< vsp1
->info
->wpf_count
; ++i
) {
1211 struct vsp1_rwpf
*wpf
= vsp1
->wpf
[i
];
1212 struct vsp1_pipeline
*pipe
;
1217 pipe
= wpf
->entity
.pipe
;
1221 ret
= wait_event_timeout(pipe
->wq
, vsp1_pipeline_stopped(pipe
),
1222 msecs_to_jiffies(500));
1224 dev_warn(vsp1
->dev
, "pipeline %u stop timeout\n",
1229 void vsp1_video_resume(struct vsp1_device
*vsp1
)
1231 unsigned long flags
;
1234 /* Resume all running pipelines. */
1235 for (i
= 0; i
< vsp1
->info
->wpf_count
; ++i
) {
1236 struct vsp1_rwpf
*wpf
= vsp1
->wpf
[i
];
1237 struct vsp1_pipeline
*pipe
;
1242 pipe
= wpf
->entity
.pipe
;
1247 * The hardware may have been reset during a suspend and will
1248 * need a full reconfiguration.
1250 pipe
->configured
= false;
1252 spin_lock_irqsave(&pipe
->irqlock
, flags
);
1253 if (vsp1_pipeline_ready(pipe
))
1254 vsp1_video_pipeline_run(pipe
);
1255 spin_unlock_irqrestore(&pipe
->irqlock
, flags
);
1259 /* -----------------------------------------------------------------------------
1260 * Initialization and Cleanup
1263 struct vsp1_video
*vsp1_video_create(struct vsp1_device
*vsp1
,
1264 struct vsp1_rwpf
*rwpf
)
1266 struct vsp1_video
*video
;
1267 const char *direction
;
1270 video
= devm_kzalloc(vsp1
->dev
, sizeof(*video
), GFP_KERNEL
);
1272 return ERR_PTR(-ENOMEM
);
1274 rwpf
->video
= video
;
1279 if (rwpf
->entity
.type
== VSP1_ENTITY_RPF
) {
1280 direction
= "input";
1281 video
->type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
1282 video
->pad
.flags
= MEDIA_PAD_FL_SOURCE
;
1283 video
->video
.vfl_dir
= VFL_DIR_TX
;
1285 direction
= "output";
1286 video
->type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
1287 video
->pad
.flags
= MEDIA_PAD_FL_SINK
;
1288 video
->video
.vfl_dir
= VFL_DIR_RX
;
1291 mutex_init(&video
->lock
);
1292 spin_lock_init(&video
->irqlock
);
1293 INIT_LIST_HEAD(&video
->irqqueue
);
1295 /* Initialize the media entity... */
1296 ret
= media_entity_pads_init(&video
->video
.entity
, 1, &video
->pad
);
1298 return ERR_PTR(ret
);
1300 /* ... and the format ... */
1301 rwpf
->format
.pixelformat
= VSP1_VIDEO_DEF_FORMAT
;
1302 rwpf
->format
.width
= VSP1_VIDEO_DEF_WIDTH
;
1303 rwpf
->format
.height
= VSP1_VIDEO_DEF_HEIGHT
;
1304 __vsp1_video_try_format(video
, &rwpf
->format
, &rwpf
->fmtinfo
);
1306 /* ... and the video node... */
1307 video
->video
.v4l2_dev
= &video
->vsp1
->v4l2_dev
;
1308 video
->video
.fops
= &vsp1_video_fops
;
1309 snprintf(video
->video
.name
, sizeof(video
->video
.name
), "%s %s",
1310 rwpf
->entity
.subdev
.name
, direction
);
1311 video
->video
.vfl_type
= VFL_TYPE_GRABBER
;
1312 video
->video
.release
= video_device_release_empty
;
1313 video
->video
.ioctl_ops
= &vsp1_video_ioctl_ops
;
1315 video_set_drvdata(&video
->video
, video
);
1317 video
->queue
.type
= video
->type
;
1318 video
->queue
.io_modes
= VB2_MMAP
| VB2_USERPTR
| VB2_DMABUF
;
1319 video
->queue
.lock
= &video
->lock
;
1320 video
->queue
.drv_priv
= video
;
1321 video
->queue
.buf_struct_size
= sizeof(struct vsp1_vb2_buffer
);
1322 video
->queue
.ops
= &vsp1_video_queue_qops
;
1323 video
->queue
.mem_ops
= &vb2_dma_contig_memops
;
1324 video
->queue
.timestamp_flags
= V4L2_BUF_FLAG_TIMESTAMP_COPY
;
1325 video
->queue
.dev
= video
->vsp1
->bus_master
;
1326 ret
= vb2_queue_init(&video
->queue
);
1328 dev_err(video
->vsp1
->dev
, "failed to initialize vb2 queue\n");
1332 /* ... and register the video device. */
1333 video
->video
.queue
= &video
->queue
;
1334 ret
= video_register_device(&video
->video
, VFL_TYPE_GRABBER
, -1);
1336 dev_err(video
->vsp1
->dev
, "failed to register video device\n");
1343 vsp1_video_cleanup(video
);
1344 return ERR_PTR(ret
);
1347 void vsp1_video_cleanup(struct vsp1_video
*video
)
1349 if (video_is_registered(&video
->video
))
1350 video_unregister_device(&video
->video
);
1352 media_entity_cleanup(&video
->video
.entity
);