2 * This file is part of Libav.
4 * Libav is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * Libav is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with Libav; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "libavutil/avassert.h"
26 #include "libavutil/channel_layout.h"
27 #include "libavutil/common.h"
28 #include "libavutil/log.h"
29 #include "libavutil/mathematics.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/samplefmt.h"
37 typedef struct TrimContext
{
44 double start_time
, end_time
;
45 int64_t start_frame
, end_frame
;
47 * in the link timebase for video,
48 * in 1/samplerate for audio
50 int64_t start_pts
, end_pts
;
51 int64_t start_sample
, end_sample
;
54 * number of video frames that arrived on this filter so far
58 * number of audio samples that arrived on this filter so far
62 * timestamp of the first frame in the output, in the timebase units
66 * duration in the timebase units
76 static int init(AVFilterContext
*ctx
)
78 TrimContext
*s
= ctx
->priv
;
80 s
->first_pts
= AV_NOPTS_VALUE
;
85 static int config_input(AVFilterLink
*inlink
)
87 AVFilterContext
*ctx
= inlink
->dst
;
88 TrimContext
*s
= ctx
->priv
;
89 AVRational tb
= (inlink
->type
== AVMEDIA_TYPE_VIDEO
) ?
90 inlink
->time_base
: (AVRational
){ 1, inlink
->sample_rate
};
92 if (s
->start_time
!= DBL_MAX
) {
93 int64_t start_pts
= lrintf(s
->start_time
/ av_q2d(tb
));
94 if (s
->start_pts
== AV_NOPTS_VALUE
|| start_pts
< s
->start_pts
)
95 s
->start_pts
= start_pts
;
97 if (s
->end_time
!= DBL_MAX
) {
98 int64_t end_pts
= lrintf(s
->end_time
/ av_q2d(tb
));
99 if (s
->end_pts
== AV_NOPTS_VALUE
|| end_pts
> s
->end_pts
)
100 s
->end_pts
= end_pts
;
103 s
->duration_tb
= lrintf(s
->duration
/ av_q2d(tb
));
108 static int request_frame(AVFilterLink
*outlink
)
110 AVFilterContext
*ctx
= outlink
->src
;
111 TrimContext
*s
= ctx
->priv
;
115 while (!s
->got_output
) {
119 ret
= ff_request_frame(ctx
->inputs
[0]);
127 #define OFFSET(x) offsetof(TrimContext, x)
128 #define COMMON_OPTS \
129 { "start", "Timestamp in seconds of the first frame that " \
130 "should be passed", OFFSET(start_time), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS }, \
131 { "end", "Timestamp in seconds of the first frame that " \
132 "should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS }, \
133 { "start_pts", "Timestamp of the first frame that should be " \
134 " passed", OFFSET(start_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
135 { "end_pts", "Timestamp of the first frame that should be " \
136 "dropped again", OFFSET(end_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
137 { "duration", "Maximum duration of the output in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, DBL_MAX, FLAGS },
140 #if CONFIG_TRIM_FILTER
141 static int trim_filter_frame(AVFilterLink
*inlink
, AVFrame
*frame
)
143 AVFilterContext
*ctx
= inlink
->dst
;
144 TrimContext
*s
= ctx
->priv
;
147 /* drop everything if EOF has already been returned */
149 av_frame_free(&frame
);
153 if (s
->start_frame
>= 0 || s
->start_pts
!= AV_NOPTS_VALUE
) {
155 if (s
->start_frame
>= 0 && s
->nb_frames
>= s
->start_frame
)
157 if (s
->start_pts
!= AV_NOPTS_VALUE
&& frame
->pts
!= AV_NOPTS_VALUE
&&
158 frame
->pts
>= s
->start_pts
)
164 if (s
->first_pts
== AV_NOPTS_VALUE
&& frame
->pts
!= AV_NOPTS_VALUE
)
165 s
->first_pts
= frame
->pts
;
167 if (s
->end_frame
!= INT64_MAX
|| s
->end_pts
!= AV_NOPTS_VALUE
|| s
->duration_tb
) {
170 if (s
->end_frame
!= INT64_MAX
&& s
->nb_frames
< s
->end_frame
)
172 if (s
->end_pts
!= AV_NOPTS_VALUE
&& frame
->pts
!= AV_NOPTS_VALUE
&&
173 frame
->pts
< s
->end_pts
)
175 if (s
->duration_tb
&& frame
->pts
!= AV_NOPTS_VALUE
&&
176 frame
->pts
- s
->first_pts
< s
->duration_tb
)
188 return ff_filter_frame(ctx
->outputs
[0], frame
);
192 av_frame_free(&frame
);
196 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM
197 static const AVOption trim_options
[] = {
199 { "start_frame", "Number of the first frame that should be passed "
200 "to the output", OFFSET(start_frame
), AV_OPT_TYPE_INT64
, { .i64
= -1 }, -1, INT64_MAX
, FLAGS
},
201 { "end_frame", "Number of the first frame that should be dropped "
202 "again", OFFSET(end_frame
), AV_OPT_TYPE_INT64
, { .i64
= INT64_MAX
}, 0, INT64_MAX
, FLAGS
},
207 static const AVClass trim_class
= {
208 .class_name
= "trim",
209 .item_name
= av_default_item_name
,
210 .option
= trim_options
,
211 .version
= LIBAVUTIL_VERSION_INT
,
214 static const AVFilterPad trim_inputs
[] = {
217 .type
= AVMEDIA_TYPE_VIDEO
,
218 .filter_frame
= trim_filter_frame
,
219 .config_props
= config_input
,
224 static const AVFilterPad trim_outputs
[] = {
227 .type
= AVMEDIA_TYPE_VIDEO
,
228 .request_frame
= request_frame
,
233 AVFilter ff_vf_trim
= {
235 .description
= NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
239 .priv_size
= sizeof(TrimContext
),
240 .priv_class
= &trim_class
,
242 .inputs
= trim_inputs
,
243 .outputs
= trim_outputs
,
245 #endif // CONFIG_TRIM_FILTER
247 #if CONFIG_ATRIM_FILTER
248 static int atrim_filter_frame(AVFilterLink
*inlink
, AVFrame
*frame
)
250 AVFilterContext
*ctx
= inlink
->dst
;
251 TrimContext
*s
= ctx
->priv
;
252 int64_t start_sample
, end_sample
= frame
->nb_samples
;
256 /* drop everything if EOF has already been returned */
258 av_frame_free(&frame
);
262 if (frame
->pts
!= AV_NOPTS_VALUE
)
263 pts
= av_rescale_q(frame
->pts
, inlink
->time_base
,
264 (AVRational
){ 1, inlink
->sample_rate
});
267 s
->next_pts
= pts
+ frame
->nb_samples
;
269 /* check if at least a part of the frame is after the start time */
270 if (s
->start_sample
< 0 && s
->start_pts
== AV_NOPTS_VALUE
) {
274 start_sample
= frame
->nb_samples
;
276 if (s
->start_sample
>= 0 &&
277 s
->nb_samples
+ frame
->nb_samples
> s
->start_sample
) {
279 start_sample
= FFMIN(start_sample
, s
->start_sample
- s
->nb_samples
);
282 if (s
->start_pts
!= AV_NOPTS_VALUE
&& pts
!= AV_NOPTS_VALUE
&&
283 pts
+ frame
->nb_samples
> s
->start_pts
) {
285 start_sample
= FFMIN(start_sample
, s
->start_pts
- pts
);
292 if (s
->first_pts
== AV_NOPTS_VALUE
)
293 s
->first_pts
= pts
+ start_sample
;
295 /* check if at least a part of the frame is before the end time */
296 if (s
->end_sample
== INT64_MAX
&& s
->end_pts
== AV_NOPTS_VALUE
&& !s
->duration_tb
) {
297 end_sample
= frame
->nb_samples
;
302 if (s
->end_sample
!= INT64_MAX
&&
303 s
->nb_samples
< s
->end_sample
) {
305 end_sample
= FFMAX(end_sample
, s
->end_sample
- s
->nb_samples
);
308 if (s
->end_pts
!= AV_NOPTS_VALUE
&& pts
!= AV_NOPTS_VALUE
&&
311 end_sample
= FFMAX(end_sample
, s
->end_pts
- pts
);
314 if (s
->duration_tb
&& pts
- s
->first_pts
< s
->duration_tb
) {
316 end_sample
= FFMAX(end_sample
, s
->first_pts
+ s
->duration_tb
- pts
);
325 s
->nb_samples
+= frame
->nb_samples
;
326 start_sample
= FFMAX(0, start_sample
);
327 end_sample
= FFMIN(frame
->nb_samples
, end_sample
);
328 av_assert0(start_sample
< end_sample
);
331 AVFrame
*out
= ff_get_audio_buffer(ctx
->outputs
[0], end_sample
- start_sample
);
333 av_frame_free(&frame
);
334 return AVERROR(ENOMEM
);
337 av_frame_copy_props(out
, frame
);
338 av_samples_copy(out
->extended_data
, frame
->extended_data
, 0, start_sample
,
339 out
->nb_samples
, av_get_channel_layout_nb_channels(frame
->channel_layout
),
341 if (out
->pts
!= AV_NOPTS_VALUE
)
342 out
->pts
+= av_rescale_q(start_sample
, (AVRational
){ 1, out
->sample_rate
},
345 av_frame_free(&frame
);
348 frame
->nb_samples
= end_sample
;
351 return ff_filter_frame(ctx
->outputs
[0], frame
);
354 s
->nb_samples
+= frame
->nb_samples
;
355 av_frame_free(&frame
);
359 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM
360 static const AVOption atrim_options
[] = {
362 { "start_sample", "Number of the first audio sample that should be "
363 "passed to the output", OFFSET(start_sample
), AV_OPT_TYPE_INT64
, { .i64
= -1 }, -1, INT64_MAX
, FLAGS
},
364 { "end_sample", "Number of the first audio sample that should be "
365 "dropped again", OFFSET(end_sample
), AV_OPT_TYPE_INT64
, { .i64
= INT64_MAX
}, 0, INT64_MAX
, FLAGS
},
370 static const AVClass atrim_class
= {
371 .class_name
= "atrim",
372 .item_name
= av_default_item_name
,
373 .option
= atrim_options
,
374 .version
= LIBAVUTIL_VERSION_INT
,
377 static const AVFilterPad atrim_inputs
[] = {
380 .type
= AVMEDIA_TYPE_AUDIO
,
381 .filter_frame
= atrim_filter_frame
,
382 .config_props
= config_input
,
387 static const AVFilterPad atrim_outputs
[] = {
390 .type
= AVMEDIA_TYPE_AUDIO
,
391 .request_frame
= request_frame
,
396 AVFilter ff_af_atrim
= {
398 .description
= NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
402 .priv_size
= sizeof(TrimContext
),
403 .priv_class
= &atrim_class
,
405 .inputs
= atrim_inputs
,
406 .outputs
= atrim_outputs
,
408 #endif // CONFIG_ATRIM_FILTER