aarch64: Add assembly support for -fsanitize=hwaddress tagged globals.
[libav.git] / libavfilter / trim.c
blob2b5754046080b6065e32e641c93d7a1cd1d62b3c
1 /*
2 * This file is part of Libav.
4 * Libav is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * Libav is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with Libav; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <float.h>
20 #include <math.h>
21 #include <stdint.h>
23 #include "config.h"
25 #include "libavutil/avassert.h"
26 #include "libavutil/channel_layout.h"
27 #include "libavutil/common.h"
28 #include "libavutil/log.h"
29 #include "libavutil/mathematics.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/samplefmt.h"
33 #include "audio.h"
34 #include "avfilter.h"
35 #include "internal.h"
37 typedef struct TrimContext {
38 const AVClass *class;
41 * AVOptions
43 double duration;
44 double start_time, end_time;
45 int64_t start_frame, end_frame;
47 * in the link timebase for video,
48 * in 1/samplerate for audio
50 int64_t start_pts, end_pts;
51 int64_t start_sample, end_sample;
54 * number of video frames that arrived on this filter so far
56 int64_t nb_frames;
58 * number of audio samples that arrived on this filter so far
60 int64_t nb_samples;
62 * timestamp of the first frame in the output, in the timebase units
64 int64_t first_pts;
66 * duration in the timebase units
68 int64_t duration_tb;
70 int64_t next_pts;
72 int eof;
73 int got_output;
74 } TrimContext;
76 static int init(AVFilterContext *ctx)
78 TrimContext *s = ctx->priv;
80 s->first_pts = AV_NOPTS_VALUE;
82 return 0;
85 static int config_input(AVFilterLink *inlink)
87 AVFilterContext *ctx = inlink->dst;
88 TrimContext *s = ctx->priv;
89 AVRational tb = (inlink->type == AVMEDIA_TYPE_VIDEO) ?
90 inlink->time_base : (AVRational){ 1, inlink->sample_rate };
92 if (s->start_time != DBL_MAX) {
93 int64_t start_pts = lrintf(s->start_time / av_q2d(tb));
94 if (s->start_pts == AV_NOPTS_VALUE || start_pts < s->start_pts)
95 s->start_pts = start_pts;
97 if (s->end_time != DBL_MAX) {
98 int64_t end_pts = lrintf(s->end_time / av_q2d(tb));
99 if (s->end_pts == AV_NOPTS_VALUE || end_pts > s->end_pts)
100 s->end_pts = end_pts;
102 if (s->duration)
103 s->duration_tb = lrintf(s->duration / av_q2d(tb));
105 return 0;
108 static int request_frame(AVFilterLink *outlink)
110 AVFilterContext *ctx = outlink->src;
111 TrimContext *s = ctx->priv;
112 int ret;
114 s->got_output = 0;
115 while (!s->got_output) {
116 if (s->eof)
117 return AVERROR_EOF;
119 ret = ff_request_frame(ctx->inputs[0]);
120 if (ret < 0)
121 return ret;
124 return 0;
127 #define OFFSET(x) offsetof(TrimContext, x)
128 #define COMMON_OPTS \
129 { "start", "Timestamp in seconds of the first frame that " \
130 "should be passed", OFFSET(start_time), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS }, \
131 { "end", "Timestamp in seconds of the first frame that " \
132 "should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS }, \
133 { "start_pts", "Timestamp of the first frame that should be " \
134 " passed", OFFSET(start_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
135 { "end_pts", "Timestamp of the first frame that should be " \
136 "dropped again", OFFSET(end_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
137 { "duration", "Maximum duration of the output in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, DBL_MAX, FLAGS },
140 #if CONFIG_TRIM_FILTER
141 static int trim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
143 AVFilterContext *ctx = inlink->dst;
144 TrimContext *s = ctx->priv;
145 int drop;
147 /* drop everything if EOF has already been returned */
148 if (s->eof) {
149 av_frame_free(&frame);
150 return 0;
153 if (s->start_frame >= 0 || s->start_pts != AV_NOPTS_VALUE) {
154 drop = 1;
155 if (s->start_frame >= 0 && s->nb_frames >= s->start_frame)
156 drop = 0;
157 if (s->start_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE &&
158 frame->pts >= s->start_pts)
159 drop = 0;
160 if (drop)
161 goto drop;
164 if (s->first_pts == AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE)
165 s->first_pts = frame->pts;
167 if (s->end_frame != INT64_MAX || s->end_pts != AV_NOPTS_VALUE || s->duration_tb) {
168 drop = 1;
170 if (s->end_frame != INT64_MAX && s->nb_frames < s->end_frame)
171 drop = 0;
172 if (s->end_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE &&
173 frame->pts < s->end_pts)
174 drop = 0;
175 if (s->duration_tb && frame->pts != AV_NOPTS_VALUE &&
176 frame->pts - s->first_pts < s->duration_tb)
177 drop = 0;
179 if (drop) {
180 s->eof = 1;
181 goto drop;
185 s->nb_frames++;
186 s->got_output = 1;
188 return ff_filter_frame(ctx->outputs[0], frame);
190 drop:
191 s->nb_frames++;
192 av_frame_free(&frame);
193 return 0;
196 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM
197 static const AVOption trim_options[] = {
198 COMMON_OPTS
199 { "start_frame", "Number of the first frame that should be passed "
200 "to the output", OFFSET(start_frame), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
201 { "end_frame", "Number of the first frame that should be dropped "
202 "again", OFFSET(end_frame), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
203 { NULL },
205 #undef FLAGS
207 static const AVClass trim_class = {
208 .class_name = "trim",
209 .item_name = av_default_item_name,
210 .option = trim_options,
211 .version = LIBAVUTIL_VERSION_INT,
214 static const AVFilterPad trim_inputs[] = {
216 .name = "default",
217 .type = AVMEDIA_TYPE_VIDEO,
218 .filter_frame = trim_filter_frame,
219 .config_props = config_input,
221 { NULL }
224 static const AVFilterPad trim_outputs[] = {
226 .name = "default",
227 .type = AVMEDIA_TYPE_VIDEO,
228 .request_frame = request_frame,
230 { NULL }
233 AVFilter ff_vf_trim = {
234 .name = "trim",
235 .description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
237 .init = init,
239 .priv_size = sizeof(TrimContext),
240 .priv_class = &trim_class,
242 .inputs = trim_inputs,
243 .outputs = trim_outputs,
245 #endif // CONFIG_TRIM_FILTER
247 #if CONFIG_ATRIM_FILTER
248 static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
250 AVFilterContext *ctx = inlink->dst;
251 TrimContext *s = ctx->priv;
252 int64_t start_sample, end_sample = frame->nb_samples;
253 int64_t pts;
254 int drop;
256 /* drop everything if EOF has already been returned */
257 if (s->eof) {
258 av_frame_free(&frame);
259 return 0;
262 if (frame->pts != AV_NOPTS_VALUE)
263 pts = av_rescale_q(frame->pts, inlink->time_base,
264 (AVRational){ 1, inlink->sample_rate });
265 else
266 pts = s->next_pts;
267 s->next_pts = pts + frame->nb_samples;
269 /* check if at least a part of the frame is after the start time */
270 if (s->start_sample < 0 && s->start_pts == AV_NOPTS_VALUE) {
271 start_sample = 0;
272 } else {
273 drop = 1;
274 start_sample = frame->nb_samples;
276 if (s->start_sample >= 0 &&
277 s->nb_samples + frame->nb_samples > s->start_sample) {
278 drop = 0;
279 start_sample = FFMIN(start_sample, s->start_sample - s->nb_samples);
282 if (s->start_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
283 pts + frame->nb_samples > s->start_pts) {
284 drop = 0;
285 start_sample = FFMIN(start_sample, s->start_pts - pts);
288 if (drop)
289 goto drop;
292 if (s->first_pts == AV_NOPTS_VALUE)
293 s->first_pts = pts + start_sample;
295 /* check if at least a part of the frame is before the end time */
296 if (s->end_sample == INT64_MAX && s->end_pts == AV_NOPTS_VALUE && !s->duration_tb) {
297 end_sample = frame->nb_samples;
298 } else {
299 drop = 1;
300 end_sample = 0;
302 if (s->end_sample != INT64_MAX &&
303 s->nb_samples < s->end_sample) {
304 drop = 0;
305 end_sample = FFMAX(end_sample, s->end_sample - s->nb_samples);
308 if (s->end_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
309 pts < s->end_pts) {
310 drop = 0;
311 end_sample = FFMAX(end_sample, s->end_pts - pts);
314 if (s->duration_tb && pts - s->first_pts < s->duration_tb) {
315 drop = 0;
316 end_sample = FFMAX(end_sample, s->first_pts + s->duration_tb - pts);
319 if (drop) {
320 s->eof = 1;
321 goto drop;
325 s->nb_samples += frame->nb_samples;
326 start_sample = FFMAX(0, start_sample);
327 end_sample = FFMIN(frame->nb_samples, end_sample);
328 av_assert0(start_sample < end_sample);
330 if (start_sample) {
331 AVFrame *out = ff_get_audio_buffer(ctx->outputs[0], end_sample - start_sample);
332 if (!out) {
333 av_frame_free(&frame);
334 return AVERROR(ENOMEM);
337 av_frame_copy_props(out, frame);
338 av_samples_copy(out->extended_data, frame->extended_data, 0, start_sample,
339 out->nb_samples, av_get_channel_layout_nb_channels(frame->channel_layout),
340 frame->format);
341 if (out->pts != AV_NOPTS_VALUE)
342 out->pts += av_rescale_q(start_sample, (AVRational){ 1, out->sample_rate },
343 inlink->time_base);
345 av_frame_free(&frame);
346 frame = out;
347 } else
348 frame->nb_samples = end_sample;
350 s->got_output = 1;
351 return ff_filter_frame(ctx->outputs[0], frame);
353 drop:
354 s->nb_samples += frame->nb_samples;
355 av_frame_free(&frame);
356 return 0;
359 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM
360 static const AVOption atrim_options[] = {
361 COMMON_OPTS
362 { "start_sample", "Number of the first audio sample that should be "
363 "passed to the output", OFFSET(start_sample), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
364 { "end_sample", "Number of the first audio sample that should be "
365 "dropped again", OFFSET(end_sample), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
366 { NULL },
368 #undef FLAGS
370 static const AVClass atrim_class = {
371 .class_name = "atrim",
372 .item_name = av_default_item_name,
373 .option = atrim_options,
374 .version = LIBAVUTIL_VERSION_INT,
377 static const AVFilterPad atrim_inputs[] = {
379 .name = "default",
380 .type = AVMEDIA_TYPE_AUDIO,
381 .filter_frame = atrim_filter_frame,
382 .config_props = config_input,
384 { NULL }
387 static const AVFilterPad atrim_outputs[] = {
389 .name = "default",
390 .type = AVMEDIA_TYPE_AUDIO,
391 .request_frame = request_frame,
393 { NULL }
396 AVFilter ff_af_atrim = {
397 .name = "atrim",
398 .description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
400 .init = init,
402 .priv_size = sizeof(TrimContext),
403 .priv_class = &atrim_class,
405 .inputs = atrim_inputs,
406 .outputs = atrim_outputs,
408 #endif // CONFIG_ATRIM_FILTER