2 * This file is part of FFmpeg.
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 #include "config_components.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/log.h"
25 #include "libavutil/mathematics.h"
26 #include "libavutil/mem.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/parseutils.h"
33 typedef struct SegmentContext
{
47 static void count_points(char *item_str
, int *nb_items
)
55 for (p
= item_str
; *p
; p
++) {
61 static int parse_points(AVFilterContext
*ctx
, char *item_str
, int nb_points
, int64_t *points
)
63 SegmentContext
*s
= ctx
->priv
;
64 char *arg
, *p
= item_str
;
69 for (int i
= 0; i
< nb_points
; i
++) {
70 if (!(arg
= av_strtok(p
, "|", &saveptr
)))
71 return AVERROR(EINVAL
);
80 if (s
->use_timestamps
) {
81 ret
= av_parse_time(&points
[i
], arg
, s
->use_timestamps
);
83 if (sscanf(arg
, "%"SCNd64
, &points
[i
]) != 1)
84 ret
= AVERROR(EINVAL
);
88 av_log(ctx
, AV_LOG_ERROR
, "Invalid splits supplied: %s\n", arg
);
99 static av_cold
int init(AVFilterContext
*ctx
, enum AVMediaType type
)
101 SegmentContext
*s
= ctx
->priv
;
105 if (s
->timestamps_str
&& s
->points_str
) {
106 av_log(ctx
, AV_LOG_ERROR
, "Both timestamps and counts supplied.\n");
107 return AVERROR(EINVAL
);
108 } else if (s
->timestamps_str
) {
109 s
->use_timestamps
= 1;
110 split_str
= s
->timestamps_str
;
111 } else if (s
->points_str
) {
112 split_str
= s
->points_str
;
114 av_log(ctx
, AV_LOG_ERROR
, "Neither timestamps nor durations nor counts supplied.\n");
115 return AVERROR(EINVAL
);
118 count_points(split_str
, &s
->nb_points
);
121 s
->points
= av_calloc(s
->nb_points
, sizeof(*s
->points
));
123 return AVERROR(ENOMEM
);
125 ret
= parse_points(ctx
, split_str
, s
->nb_points
- 1, s
->points
);
129 s
->points
[s
->nb_points
- 1] = INT64_MAX
;
131 for (int i
= 0; i
< s
->nb_points
; i
++) {
132 AVFilterPad pad
= { 0 };
135 pad
.name
= av_asprintf("output%d", i
);
137 return AVERROR(ENOMEM
);
139 if ((ret
= ff_append_outpad_free_name(ctx
, &pad
)) < 0)
146 static int config_input(AVFilterLink
*inlink
)
148 AVFilterContext
*ctx
= inlink
->dst
;
149 SegmentContext
*s
= ctx
->priv
;
150 AVRational tb
= inlink
->time_base
;
152 if (s
->use_timestamps
) {
153 for (int i
= 0; i
< s
->nb_points
- 1; i
++)
154 s
->points
[i
] = av_rescale_q(s
->points
[i
], AV_TIME_BASE_Q
, tb
);
160 static int current_segment_finished(AVFilterContext
*ctx
, AVFrame
*frame
)
162 SegmentContext
*s
= ctx
->priv
;
163 AVFilterLink
*inlink
= ctx
->inputs
[0];
164 FilterLink
*inl
= ff_filter_link(inlink
);
167 if (s
->use_timestamps
) {
168 ret
= frame
->pts
>= s
->points
[s
->current_point
];
170 switch (inlink
->type
) {
171 case AVMEDIA_TYPE_VIDEO
:
172 ret
= inl
->frame_count_out
- 1 >= s
->points
[s
->current_point
];
174 case AVMEDIA_TYPE_AUDIO
:
175 ret
= inl
->sample_count_out
- frame
->nb_samples
>= s
->points
[s
->current_point
];
183 static int activate(AVFilterContext
*ctx
)
185 AVFilterLink
*inlink
= ctx
->inputs
[0];
186 FilterLink
*inl
= ff_filter_link(inlink
);
187 SegmentContext
*s
= ctx
->priv
;
188 AVFrame
*frame
= NULL
;
194 for (int i
= s
->current_point
; i
< s
->nb_points
; i
++) {
195 FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx
->outputs
[i
], ctx
);
198 switch (inlink
->type
) {
199 case AVMEDIA_TYPE_VIDEO
:
200 ret
= ff_inlink_consume_frame(inlink
, &frame
);
202 case AVMEDIA_TYPE_AUDIO
:
203 diff
= s
->points
[s
->current_point
] - inl
->sample_count_out
;
205 ff_outlink_set_status(ctx
->outputs
[s
->current_point
], AVERROR_EOF
, s
->last_pts
);
207 if (s
->current_point
>= s
->nb_points
)
208 return AVERROR(EINVAL
);
210 diff
= s
->points
[s
->current_point
] - inl
->sample_count_out
;
212 if (s
->use_timestamps
) {
213 max_samples
= av_rescale_q(diff
, av_make_q(1, inlink
->sample_rate
), inlink
->time_base
);
215 max_samples
= FFMAX(1, FFMIN(diff
, INT_MAX
));
217 if (max_samples
<= 0 || max_samples
> INT_MAX
)
218 ret
= ff_inlink_consume_frame(inlink
, &frame
);
220 ret
= ff_inlink_consume_samples(inlink
, 1, max_samples
, &frame
);
227 s
->last_pts
= frame
->pts
;
228 while (current_segment_finished(ctx
, frame
)) {
229 ff_outlink_set_status(ctx
->outputs
[s
->current_point
], AVERROR_EOF
, frame
->pts
);
233 if (s
->current_point
>= s
->nb_points
) {
234 av_frame_free(&frame
);
235 return AVERROR(EINVAL
);
238 ret
= ff_filter_frame(ctx
->outputs
[s
->current_point
], frame
);
243 } else if (ff_inlink_acknowledge_status(inlink
, &status
, &pts
)) {
244 for (int i
= s
->current_point
; i
< s
->nb_points
; i
++)
245 ff_outlink_set_status(ctx
->outputs
[i
], status
, pts
);
248 for (int i
= s
->current_point
; i
< s
->nb_points
; i
++) {
249 if (ff_outlink_frame_wanted(ctx
->outputs
[i
]))
250 ff_inlink_request_frame(inlink
);
256 static av_cold
void uninit(AVFilterContext
*ctx
)
258 SegmentContext
*s
= ctx
->priv
;
260 av_freep(&s
->points
);
263 #define OFFSET(x) offsetof(SegmentContext, x)
264 #define COMMON_OPTS \
265 { "timestamps", "timestamps of input at which to split input", OFFSET(timestamps_str), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS }, \
267 #if CONFIG_SEGMENT_FILTER
269 static av_cold
int video_init(AVFilterContext
*ctx
)
271 return init(ctx
, AVMEDIA_TYPE_VIDEO
);
274 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
275 static const AVOption segment_options
[] = {
277 { "frames", "frames at which to split input", OFFSET(points_str
), AV_OPT_TYPE_STRING
, { .str
= NULL
}, 0, 0, FLAGS
},
282 AVFILTER_DEFINE_CLASS(segment
);
284 static const AVFilterPad segment_inputs
[] = {
287 .type
= AVMEDIA_TYPE_VIDEO
,
288 .config_props
= config_input
,
292 const FFFilter ff_vf_segment
= {
294 .p
.description
= NULL_IF_CONFIG_SMALL("Segment video stream."),
295 .p
.priv_class
= &segment_class
,
296 .p
.flags
= AVFILTER_FLAG_DYNAMIC_OUTPUTS
| AVFILTER_FLAG_METADATA_ONLY
,
299 .priv_size
= sizeof(SegmentContext
),
300 .activate
= activate
,
301 FILTER_INPUTS(segment_inputs
),
303 #endif // CONFIG_SEGMENT_FILTER
305 #if CONFIG_ASEGMENT_FILTER
307 static av_cold
int audio_init(AVFilterContext
*ctx
)
309 return init(ctx
, AVMEDIA_TYPE_AUDIO
);
312 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
313 static const AVOption asegment_options
[] = {
315 { "samples", "samples at which to split input", OFFSET(points_str
), AV_OPT_TYPE_STRING
, { .str
= NULL
}, 0, 0, FLAGS
},
320 AVFILTER_DEFINE_CLASS(asegment
);
322 static const AVFilterPad asegment_inputs
[] = {
325 .type
= AVMEDIA_TYPE_AUDIO
,
326 .config_props
= config_input
,
330 const FFFilter ff_af_asegment
= {
331 .p
.name
= "asegment",
332 .p
.description
= NULL_IF_CONFIG_SMALL("Segment audio stream."),
333 .p
.priv_class
= &asegment_class
,
334 .p
.flags
= AVFILTER_FLAG_DYNAMIC_OUTPUTS
| AVFILTER_FLAG_METADATA_ONLY
,
337 .priv_size
= sizeof(SegmentContext
),
338 .activate
= activate
,
339 FILTER_INPUTS(asegment_inputs
),
341 #endif // CONFIG_ASEGMENT_FILTER