avformat/mxfdec: Check edit unit for overflow in mxf_set_current_edit_unit()
[FFMpeg-mirror.git] / libavfilter / f_segment.c
blobece53ae56d50c53e96a6994787489bec605595c6
1 /*
2 * This file is part of FFmpeg.
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 #include "config_components.h"
21 #include <stdint.h>
23 #include "libavutil/avstring.h"
24 #include "libavutil/log.h"
25 #include "libavutil/mathematics.h"
26 #include "libavutil/mem.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/parseutils.h"
30 #include "avfilter.h"
31 #include "filters.h"
33 typedef struct SegmentContext {
34 const AVClass *class;
36 char *timestamps_str;
37 char *points_str;
38 int use_timestamps;
40 int current_point;
41 int nb_points;
42 int64_t last_pts;
44 int64_t *points;
45 } SegmentContext;
47 static void count_points(char *item_str, int *nb_items)
49 char *p;
51 if (!item_str)
52 return;
54 *nb_items = 1;
55 for (p = item_str; *p; p++) {
56 if (*p == '|')
57 (*nb_items)++;
61 static int parse_points(AVFilterContext *ctx, char *item_str, int nb_points, int64_t *points)
63 SegmentContext *s = ctx->priv;
64 char *arg, *p = item_str;
65 char *saveptr = NULL;
66 int64_t ref, cur = 0;
67 int ret = 0;
69 for (int i = 0; i < nb_points; i++) {
70 if (!(arg = av_strtok(p, "|", &saveptr)))
71 return AVERROR(EINVAL);
73 p = NULL;
74 ref = 0;
75 if (*arg == '+') {
76 ref = cur;
77 arg++;
80 if (s->use_timestamps) {
81 ret = av_parse_time(&points[i], arg, s->use_timestamps);
82 } else {
83 if (sscanf(arg, "%"SCNd64, &points[i]) != 1)
84 ret = AVERROR(EINVAL);
87 if (ret < 0) {
88 av_log(ctx, AV_LOG_ERROR, "Invalid splits supplied: %s\n", arg);
89 return ret;
92 cur = points[i];
93 points[i] += ref;
96 return 0;
99 static av_cold int init(AVFilterContext *ctx, enum AVMediaType type)
101 SegmentContext *s = ctx->priv;
102 char *split_str;
103 int ret;
105 if (s->timestamps_str && s->points_str) {
106 av_log(ctx, AV_LOG_ERROR, "Both timestamps and counts supplied.\n");
107 return AVERROR(EINVAL);
108 } else if (s->timestamps_str) {
109 s->use_timestamps = 1;
110 split_str = s->timestamps_str;
111 } else if (s->points_str) {
112 split_str = s->points_str;
113 } else {
114 av_log(ctx, AV_LOG_ERROR, "Neither timestamps nor durations nor counts supplied.\n");
115 return AVERROR(EINVAL);
118 count_points(split_str, &s->nb_points);
119 s->nb_points++;
121 s->points = av_calloc(s->nb_points, sizeof(*s->points));
122 if (!s->points)
123 return AVERROR(ENOMEM);
125 ret = parse_points(ctx, split_str, s->nb_points - 1, s->points);
126 if (ret < 0)
127 return ret;
129 s->points[s->nb_points - 1] = INT64_MAX;
131 for (int i = 0; i < s->nb_points; i++) {
132 AVFilterPad pad = { 0 };
134 pad.type = type;
135 pad.name = av_asprintf("output%d", i);
136 if (!pad.name)
137 return AVERROR(ENOMEM);
139 if ((ret = ff_append_outpad_free_name(ctx, &pad)) < 0)
140 return ret;
143 return 0;
146 static int config_input(AVFilterLink *inlink)
148 AVFilterContext *ctx = inlink->dst;
149 SegmentContext *s = ctx->priv;
150 AVRational tb = inlink->time_base;
152 if (s->use_timestamps) {
153 for (int i = 0; i < s->nb_points - 1; i++)
154 s->points[i] = av_rescale_q(s->points[i], AV_TIME_BASE_Q, tb);
157 return 0;
160 static int current_segment_finished(AVFilterContext *ctx, AVFrame *frame)
162 SegmentContext *s = ctx->priv;
163 AVFilterLink *inlink = ctx->inputs[0];
164 FilterLink *inl = ff_filter_link(inlink);
165 int ret = 0;
167 if (s->use_timestamps) {
168 ret = frame->pts >= s->points[s->current_point];
169 } else {
170 switch (inlink->type) {
171 case AVMEDIA_TYPE_VIDEO:
172 ret = inl->frame_count_out - 1 >= s->points[s->current_point];
173 break;
174 case AVMEDIA_TYPE_AUDIO:
175 ret = inl->sample_count_out - frame->nb_samples >= s->points[s->current_point];
176 break;
180 return ret;
183 static int activate(AVFilterContext *ctx)
185 AVFilterLink *inlink = ctx->inputs[0];
186 FilterLink *inl = ff_filter_link(inlink);
187 SegmentContext *s = ctx->priv;
188 AVFrame *frame = NULL;
189 int ret, status;
190 int64_t max_samples;
191 int64_t diff;
192 int64_t pts;
194 for (int i = s->current_point; i < s->nb_points; i++) {
195 FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[i], ctx);
198 switch (inlink->type) {
199 case AVMEDIA_TYPE_VIDEO:
200 ret = ff_inlink_consume_frame(inlink, &frame);
201 break;
202 case AVMEDIA_TYPE_AUDIO:
203 diff = s->points[s->current_point] - inl->sample_count_out;
204 while (diff <= 0) {
205 ff_outlink_set_status(ctx->outputs[s->current_point], AVERROR_EOF, s->last_pts);
206 s->current_point++;
207 if (s->current_point >= s->nb_points)
208 return AVERROR(EINVAL);
210 diff = s->points[s->current_point] - inl->sample_count_out;
212 if (s->use_timestamps) {
213 max_samples = av_rescale_q(diff, av_make_q(1, inlink->sample_rate), inlink->time_base);
214 } else {
215 max_samples = FFMAX(1, FFMIN(diff, INT_MAX));
217 if (max_samples <= 0 || max_samples > INT_MAX)
218 ret = ff_inlink_consume_frame(inlink, &frame);
219 else
220 ret = ff_inlink_consume_samples(inlink, 1, max_samples, &frame);
221 break;
222 default:
223 return AVERROR_BUG;
226 if (ret > 0) {
227 s->last_pts = frame->pts;
228 while (current_segment_finished(ctx, frame)) {
229 ff_outlink_set_status(ctx->outputs[s->current_point], AVERROR_EOF, frame->pts);
230 s->current_point++;
233 if (s->current_point >= s->nb_points) {
234 av_frame_free(&frame);
235 return AVERROR(EINVAL);
238 ret = ff_filter_frame(ctx->outputs[s->current_point], frame);
241 if (ret < 0) {
242 return ret;
243 } else if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
244 for (int i = s->current_point; i < s->nb_points; i++)
245 ff_outlink_set_status(ctx->outputs[i], status, pts);
246 return 0;
247 } else {
248 for (int i = s->current_point; i < s->nb_points; i++) {
249 if (ff_outlink_frame_wanted(ctx->outputs[i]))
250 ff_inlink_request_frame(inlink);
252 return 0;
256 static av_cold void uninit(AVFilterContext *ctx)
258 SegmentContext *s = ctx->priv;
260 av_freep(&s->points);
263 #define OFFSET(x) offsetof(SegmentContext, x)
264 #define COMMON_OPTS \
265 { "timestamps", "timestamps of input at which to split input", OFFSET(timestamps_str), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS }, \
267 #if CONFIG_SEGMENT_FILTER
269 static av_cold int video_init(AVFilterContext *ctx)
271 return init(ctx, AVMEDIA_TYPE_VIDEO);
274 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
275 static const AVOption segment_options[] = {
276 COMMON_OPTS
277 { "frames", "frames at which to split input", OFFSET(points_str), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
278 { NULL }
280 #undef FLAGS
282 AVFILTER_DEFINE_CLASS(segment);
284 static const AVFilterPad segment_inputs[] = {
286 .name = "default",
287 .type = AVMEDIA_TYPE_VIDEO,
288 .config_props = config_input,
292 const FFFilter ff_vf_segment = {
293 .p.name = "segment",
294 .p.description = NULL_IF_CONFIG_SMALL("Segment video stream."),
295 .p.priv_class = &segment_class,
296 .p.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS | AVFILTER_FLAG_METADATA_ONLY,
297 .init = video_init,
298 .uninit = uninit,
299 .priv_size = sizeof(SegmentContext),
300 .activate = activate,
301 FILTER_INPUTS(segment_inputs),
303 #endif // CONFIG_SEGMENT_FILTER
305 #if CONFIG_ASEGMENT_FILTER
307 static av_cold int audio_init(AVFilterContext *ctx)
309 return init(ctx, AVMEDIA_TYPE_AUDIO);
312 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
313 static const AVOption asegment_options[] = {
314 COMMON_OPTS
315 { "samples", "samples at which to split input", OFFSET(points_str), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
316 { NULL }
318 #undef FLAGS
320 AVFILTER_DEFINE_CLASS(asegment);
322 static const AVFilterPad asegment_inputs[] = {
324 .name = "default",
325 .type = AVMEDIA_TYPE_AUDIO,
326 .config_props = config_input,
330 const FFFilter ff_af_asegment = {
331 .p.name = "asegment",
332 .p.description = NULL_IF_CONFIG_SMALL("Segment audio stream."),
333 .p.priv_class = &asegment_class,
334 .p.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS | AVFILTER_FLAG_METADATA_ONLY,
335 .init = audio_init,
336 .uninit = uninit,
337 .priv_size = sizeof(SegmentContext),
338 .activate = activate,
339 FILTER_INPUTS(asegment_inputs),
341 #endif // CONFIG_ASEGMENT_FILTER