2 * Copyright (c) 2013 Stefano Sabatini
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * audio and video interleaver
26 #include "config_components.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/avstring.h"
30 #include "libavutil/opt.h"
37 typedef struct InterleaveContext
{
44 #define DURATION_LONGEST 0
45 #define DURATION_SHORTEST 1
46 #define DURATION_FIRST 2
48 #define OFFSET(x) offsetof(InterleaveContext, x)
50 #define DEFINE_OPTIONS(filt_name, flags_) \
51 static const AVOption filt_name##_options[] = { \
52 { "nb_inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, .flags = flags_ }, \
53 { "n", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, .flags = flags_ }, \
54 { "duration", "how to determine the end-of-stream", \
55 OFFSET(duration_mode), AV_OPT_TYPE_INT, { .i64 = DURATION_LONGEST }, 0, 2, flags_, .unit = "duration" }, \
56 { "longest", "Duration of longest input", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_LONGEST }, 0, 0, flags_, .unit = "duration" }, \
57 { "shortest", "Duration of shortest input", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_SHORTEST }, 0, 0, flags_, .unit = "duration" }, \
58 { "first", "Duration of first input", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_FIRST }, 0, 0, flags_, .unit = "duration" }, \
62 static int activate(AVFilterContext
*ctx
)
64 AVFilterLink
*outlink
= ctx
->outputs
[0];
65 InterleaveContext
*s
= ctx
->priv
;
66 int64_t q_pts
, pts
= INT64_MAX
;
67 int i
, nb_eofs
= 0, input_idx
= -1;
71 int nb_inputs_with_frames
= 0;
73 FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink
, ctx
);
75 for (i
= 0; i
< ctx
->nb_inputs
; i
++) {
76 int is_eof
= !!ff_inlink_acknowledge_status(ctx
->inputs
[i
], &status
, &rpts
);
83 if ((nb_eofs
> 0 && s
->duration_mode
== DURATION_SHORTEST
) ||
84 (nb_eofs
== ctx
->nb_inputs
&& s
->duration_mode
== DURATION_LONGEST
) ||
85 (first_eof
&& s
->duration_mode
== DURATION_FIRST
)) {
86 ff_outlink_set_status(outlink
, AVERROR_EOF
, s
->pts
);
90 for (i
= 0; i
< ctx
->nb_inputs
; i
++) {
91 if (!ff_inlink_queued_frames(ctx
->inputs
[i
]))
93 nb_inputs_with_frames
++;
96 if (nb_inputs_with_frames
>= ctx
->nb_inputs
- nb_eofs
) {
97 for (i
= 0; i
< ctx
->nb_inputs
; i
++) {
100 if (ff_inlink_queued_frames(ctx
->inputs
[i
]) == 0)
103 frame
= ff_inlink_peek_frame(ctx
->inputs
[i
], 0);
104 if (frame
->pts
== AV_NOPTS_VALUE
) {
107 av_log(ctx
, AV_LOG_WARNING
,
108 "NOPTS value for input frame cannot be accepted, frame discarded\n");
109 ret
= ff_inlink_consume_frame(ctx
->inputs
[i
], &frame
);
112 av_frame_free(&frame
);
113 return AVERROR_INVALIDDATA
;
116 q_pts
= av_rescale_q(frame
->pts
, ctx
->inputs
[i
]->time_base
, AV_TIME_BASE_Q
);
123 if (input_idx
>= 0) {
127 ret
= ff_inlink_consume_frame(ctx
->inputs
[input_idx
], &frame
);
131 frame
->pts
= s
->pts
= pts
;
132 return ff_filter_frame(outlink
, frame
);
136 for (i
= 0; i
< ctx
->nb_inputs
; i
++) {
137 if (ff_inlink_queued_frames(ctx
->inputs
[i
]))
139 if (ff_outlink_frame_wanted(outlink
) &&
140 !ff_outlink_get_status(ctx
->inputs
[i
])) {
141 ff_inlink_request_frame(ctx
->inputs
[i
]);
146 if (i
== ctx
->nb_inputs
- nb_eofs
&& ff_outlink_frame_wanted(outlink
)) {
147 ff_filter_set_ready(ctx
, 100);
151 return FFERROR_NOT_READY
;
154 static av_cold
int init(AVFilterContext
*ctx
)
156 InterleaveContext
*s
= ctx
->priv
;
157 const AVFilterPad
*outpad
= &ctx
->filter
->outputs
[0];
160 for (i
= 0; i
< s
->nb_inputs
; i
++) {
161 AVFilterPad inpad
= { 0 };
163 inpad
.name
= av_asprintf("input%d", i
);
165 return AVERROR(ENOMEM
);
166 inpad
.type
= outpad
->type
;
168 switch (outpad
->type
) {
169 case AVMEDIA_TYPE_VIDEO
:
170 inpad
.get_buffer
.video
= ff_null_get_video_buffer
; break;
171 case AVMEDIA_TYPE_AUDIO
:
172 inpad
.get_buffer
.audio
= ff_null_get_audio_buffer
; break;
176 if ((ret
= ff_append_inpad_free_name(ctx
, &inpad
)) < 0)
183 static int config_output(AVFilterLink
*outlink
)
185 FilterLink
*l
= ff_filter_link(outlink
);
186 AVFilterContext
*ctx
= outlink
->src
;
187 AVFilterLink
*inlink0
= ctx
->inputs
[0];
190 if (outlink
->type
== AVMEDIA_TYPE_VIDEO
) {
191 outlink
->time_base
= AV_TIME_BASE_Q
;
192 outlink
->w
= inlink0
->w
;
193 outlink
->h
= inlink0
->h
;
194 outlink
->sample_aspect_ratio
= inlink0
->sample_aspect_ratio
;
195 outlink
->format
= inlink0
->format
;
196 l
->frame_rate
= (AVRational
) {1, 0};
197 for (i
= 1; i
< ctx
->nb_inputs
; i
++) {
198 AVFilterLink
*inlink
= ctx
->inputs
[i
];
200 if (outlink
->w
!= inlink
->w
||
201 outlink
->h
!= inlink
->h
||
202 outlink
->sample_aspect_ratio
.num
!= inlink
->sample_aspect_ratio
.num
||
203 outlink
->sample_aspect_ratio
.den
!= inlink
->sample_aspect_ratio
.den
) {
204 av_log(ctx
, AV_LOG_ERROR
, "Parameters for input link %s "
205 "(size %dx%d, SAR %d:%d) do not match the corresponding "
206 "output link parameters (%dx%d, SAR %d:%d)\n",
207 ctx
->input_pads
[i
].name
, inlink
->w
, inlink
->h
,
208 inlink
->sample_aspect_ratio
.num
,
209 inlink
->sample_aspect_ratio
.den
,
210 outlink
->w
, outlink
->h
,
211 outlink
->sample_aspect_ratio
.num
,
212 outlink
->sample_aspect_ratio
.den
);
213 return AVERROR(EINVAL
);
220 #if CONFIG_INTERLEAVE_FILTER
222 DEFINE_OPTIONS(interleave
, AV_OPT_FLAG_VIDEO_PARAM
|AV_OPT_FLAG_FILTERING_PARAM
);
223 AVFILTER_DEFINE_CLASS(interleave
);
225 static const AVFilterPad interleave_outputs
[] = {
228 .type
= AVMEDIA_TYPE_VIDEO
,
229 .config_props
= config_output
,
233 const FFFilter ff_vf_interleave
= {
234 .p
.name
= "interleave",
235 .p
.description
= NULL_IF_CONFIG_SMALL("Temporally interleave video inputs."),
236 .p
.priv_class
= &interleave_class
,
237 .p
.flags
= AVFILTER_FLAG_DYNAMIC_INPUTS
,
238 .priv_size
= sizeof(InterleaveContext
),
240 .activate
= activate
,
241 FILTER_OUTPUTS(interleave_outputs
),
246 #if CONFIG_AINTERLEAVE_FILTER
248 DEFINE_OPTIONS(ainterleave
, AV_OPT_FLAG_AUDIO_PARAM
|AV_OPT_FLAG_FILTERING_PARAM
);
249 AVFILTER_DEFINE_CLASS(ainterleave
);
251 static const AVFilterPad ainterleave_outputs
[] = {
254 .type
= AVMEDIA_TYPE_AUDIO
,
255 .config_props
= config_output
,
259 const FFFilter ff_af_ainterleave
= {
260 .p
.name
= "ainterleave",
261 .p
.description
= NULL_IF_CONFIG_SMALL("Temporally interleave audio inputs."),
262 .p
.priv_class
= &ainterleave_class
,
263 .p
.flags
= AVFILTER_FLAG_DYNAMIC_INPUTS
,
264 .priv_size
= sizeof(InterleaveContext
),
266 .activate
= activate
,
267 FILTER_OUTPUTS(ainterleave_outputs
),