lavc/vvc: Fix overflow in MVD derivation
[ffmpeg.git] / fftools / ffmpeg_filter.c
blob29797a0b7109ed4e6821b3dd759fc1ca585d67b0
1 /*
2 * ffmpeg filter configuration
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include <stdint.h>
23 #include "ffmpeg.h"
25 #include "libavfilter/avfilter.h"
26 #include "libavfilter/buffersink.h"
27 #include "libavfilter/buffersrc.h"
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/bprint.h"
32 #include "libavutil/channel_layout.h"
33 #include "libavutil/mem.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/pixfmt.h"
37 #include "libavutil/samplefmt.h"
38 #include "libavutil/time.h"
39 #include "libavutil/timestamp.h"
41 // FIXME private header, used for mid_pred()
42 #include "libavcodec/mathops.h"
44 typedef struct FilterGraphPriv {
45 FilterGraph fg;
47 // name used for logging
48 char log_name[32];
50 int is_simple;
51 // true when the filtergraph contains only meta filters
52 // that do not modify the frame data
53 int is_meta;
54 // source filters are present in the graph
55 int have_sources;
56 int disable_conversions;
58 unsigned nb_outputs_done;
60 const char *graph_desc;
62 char *nb_threads;
64 // frame for temporarily holding output from the filtergraph
65 AVFrame *frame;
66 // frame for sending output to the encoder
67 AVFrame *frame_enc;
69 Scheduler *sch;
70 unsigned sch_idx;
71 } FilterGraphPriv;
73 static FilterGraphPriv *fgp_from_fg(FilterGraph *fg)
75 return (FilterGraphPriv*)fg;
78 static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
80 return (const FilterGraphPriv*)fg;
83 // data that is local to the filter thread and not visible outside of it
84 typedef struct FilterGraphThread {
85 AVFilterGraph *graph;
87 AVFrame *frame;
89 // Temporary buffer for output frames, since on filtergraph reset
90 // we cannot send them to encoders immediately.
91 // The output index is stored in frame opaque.
92 AVFifo *frame_queue_out;
94 // index of the next input to request from the scheduler
95 unsigned next_in;
96 // set to 1 after at least one frame passed through this output
97 int got_frame;
99 // EOF status of each input/output, as received by the thread
100 uint8_t *eof_in;
101 uint8_t *eof_out;
102 } FilterGraphThread;
104 typedef struct InputFilterPriv {
105 InputFilter ifilter;
107 InputFilterOptions opts;
109 int index;
111 AVFilterContext *filter;
113 // used to hold submitted input
114 AVFrame *frame;
116 /* for filters that are not yet bound to an input stream,
117 * this stores the input linklabel, if any */
118 uint8_t *linklabel;
120 // filter data type
121 enum AVMediaType type;
122 // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video,
123 // same as type otherwise
124 enum AVMediaType type_src;
126 int eof;
127 int bound;
129 // parameters configured for this input
130 int format;
132 int width, height;
133 AVRational sample_aspect_ratio;
134 enum AVColorSpace color_space;
135 enum AVColorRange color_range;
137 int sample_rate;
138 AVChannelLayout ch_layout;
140 AVRational time_base;
142 AVFifo *frame_queue;
144 AVBufferRef *hw_frames_ctx;
146 int displaymatrix_present;
147 int displaymatrix_applied;
148 int32_t displaymatrix[9];
150 struct {
151 AVFrame *frame;
153 int64_t last_pts;
154 int64_t end_pts;
156 ///< marks if sub2video_update should force an initialization
157 unsigned int initialize;
158 } sub2video;
159 } InputFilterPriv;
161 static InputFilterPriv *ifp_from_ifilter(InputFilter *ifilter)
163 return (InputFilterPriv*)ifilter;
166 typedef struct FPSConvContext {
167 AVFrame *last_frame;
168 /* number of frames emitted by the video-encoding sync code */
169 int64_t frame_number;
170 /* history of nb_frames_prev, i.e. the number of times the
171 * previous frame was duplicated by vsync code in recent
172 * do_video_out() calls */
173 int64_t frames_prev_hist[3];
175 uint64_t dup_warning;
177 int last_dropped;
178 int dropped_keyframe;
180 enum VideoSyncMethod vsync_method;
182 AVRational framerate;
183 AVRational framerate_max;
184 const AVRational *framerate_supported;
185 int framerate_clip;
186 } FPSConvContext;
188 typedef struct OutputFilterPriv {
189 OutputFilter ofilter;
191 int index;
193 void *log_parent;
194 char log_name[32];
196 char *name;
198 AVFilterContext *filter;
200 /* desired output stream properties */
201 int format;
202 int width, height;
203 int sample_rate;
204 AVChannelLayout ch_layout;
205 enum AVColorSpace color_space;
206 enum AVColorRange color_range;
208 // time base in which the output is sent to our downstream
209 // does not need to match the filtersink's timebase
210 AVRational tb_out;
211 // at least one frame with the above timebase was sent
212 // to our downstream, so it cannot change anymore
213 int tb_out_locked;
215 AVRational sample_aspect_ratio;
217 AVDictionary *sws_opts;
218 AVDictionary *swr_opts;
220 // those are only set if no format is specified and the encoder gives us multiple options
221 // They point directly to the relevant lists of the encoder.
222 const int *formats;
223 const AVChannelLayout *ch_layouts;
224 const int *sample_rates;
225 const enum AVColorSpace *color_spaces;
226 const enum AVColorRange *color_ranges;
228 AVRational enc_timebase;
229 int64_t trim_start_us;
230 int64_t trim_duration_us;
231 // offset for output timestamps, in AV_TIME_BASE_Q
232 int64_t ts_offset;
233 int64_t next_pts;
234 FPSConvContext fps;
236 unsigned flags;
237 } OutputFilterPriv;
239 static OutputFilterPriv *ofp_from_ofilter(OutputFilter *ofilter)
241 return (OutputFilterPriv*)ofilter;
244 typedef struct FilterCommand {
245 char *target;
246 char *command;
247 char *arg;
249 double time;
250 int all_filters;
251 } FilterCommand;
253 static void filter_command_free(void *opaque, uint8_t *data)
255 FilterCommand *fc = (FilterCommand*)data;
257 av_freep(&fc->target);
258 av_freep(&fc->command);
259 av_freep(&fc->arg);
261 av_free(data);
264 static int sub2video_get_blank_frame(InputFilterPriv *ifp)
266 AVFrame *frame = ifp->sub2video.frame;
267 int ret;
269 av_frame_unref(frame);
271 frame->width = ifp->width;
272 frame->height = ifp->height;
273 frame->format = ifp->format;
274 frame->colorspace = ifp->color_space;
275 frame->color_range = ifp->color_range;
277 ret = av_frame_get_buffer(frame, 0);
278 if (ret < 0)
279 return ret;
281 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
283 return 0;
286 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
287 AVSubtitleRect *r)
289 uint32_t *pal, *dst2;
290 uint8_t *src, *src2;
291 int x, y;
293 if (r->type != SUBTITLE_BITMAP) {
294 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
295 return;
297 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
298 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
299 r->x, r->y, r->w, r->h, w, h
301 return;
304 dst += r->y * dst_linesize + r->x * 4;
305 src = r->data[0];
306 pal = (uint32_t *)r->data[1];
307 for (y = 0; y < r->h; y++) {
308 dst2 = (uint32_t *)dst;
309 src2 = src;
310 for (x = 0; x < r->w; x++)
311 *(dst2++) = pal[*(src2++)];
312 dst += dst_linesize;
313 src += r->linesize[0];
317 static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
319 AVFrame *frame = ifp->sub2video.frame;
320 int ret;
322 av_assert1(frame->data[0]);
323 ifp->sub2video.last_pts = frame->pts = pts;
324 ret = av_buffersrc_add_frame_flags(ifp->filter, frame,
325 AV_BUFFERSRC_FLAG_KEEP_REF |
326 AV_BUFFERSRC_FLAG_PUSH);
327 if (ret != AVERROR_EOF && ret < 0)
328 av_log(ifp->ifilter.graph, AV_LOG_WARNING,
329 "Error while add the frame to buffer source(%s).\n",
330 av_err2str(ret));
333 static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
334 const AVSubtitle *sub)
336 AVFrame *frame = ifp->sub2video.frame;
337 int8_t *dst;
338 int dst_linesize;
339 int num_rects;
340 int64_t pts, end_pts;
342 if (sub) {
343 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
344 AV_TIME_BASE_Q, ifp->time_base);
345 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
346 AV_TIME_BASE_Q, ifp->time_base);
347 num_rects = sub->num_rects;
348 } else {
349 /* If we are initializing the system, utilize current heartbeat
350 PTS as the start time, and show until the following subpicture
351 is received. Otherwise, utilize the previous subpicture's end time
352 as the fall-back value. */
353 pts = ifp->sub2video.initialize ?
354 heartbeat_pts : ifp->sub2video.end_pts;
355 end_pts = INT64_MAX;
356 num_rects = 0;
358 if (sub2video_get_blank_frame(ifp) < 0) {
359 av_log(ifp->ifilter.graph, AV_LOG_ERROR,
360 "Impossible to get a blank canvas.\n");
361 return;
363 dst = frame->data [0];
364 dst_linesize = frame->linesize[0];
365 for (int i = 0; i < num_rects; i++)
366 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
367 sub2video_push_ref(ifp, pts);
368 ifp->sub2video.end_pts = end_pts;
369 ifp->sub2video.initialize = 0;
372 /* Define a function for appending a list of allowed formats
373 * to an AVBPrint. If nonempty, the list will have a header. */
374 #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
375 static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
377 if (ofp->var == none && !ofp->supported_list) \
378 return; \
379 av_bprintf(bprint, #name "="); \
380 if (ofp->var != none) { \
381 av_bprintf(bprint, printf_format, get_name(ofp->var)); \
382 } else { \
383 const type *p; \
385 for (p = ofp->supported_list; *p != none; p++) { \
386 av_bprintf(bprint, printf_format "|", get_name(*p)); \
388 if (bprint->len > 0) \
389 bprint->str[--bprint->len] = '\0'; \
391 av_bprint_chars(bprint, ':', 1); \
394 DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats,
395 AV_PIX_FMT_NONE, "%s", av_get_pix_fmt_name)
397 DEF_CHOOSE_FORMAT(sample_fmts, enum AVSampleFormat, format, formats,
398 AV_SAMPLE_FMT_NONE, "%s", av_get_sample_fmt_name)
400 DEF_CHOOSE_FORMAT(sample_rates, int, sample_rate, sample_rates, 0,
401 "%d", )
403 DEF_CHOOSE_FORMAT(color_spaces, enum AVColorSpace, color_space, color_spaces,
404 AVCOL_SPC_UNSPECIFIED, "%s", av_color_space_name);
406 DEF_CHOOSE_FORMAT(color_ranges, enum AVColorRange, color_range, color_ranges,
407 AVCOL_RANGE_UNSPECIFIED, "%s", av_color_range_name);
409 static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
411 if (av_channel_layout_check(&ofp->ch_layout)) {
412 av_bprintf(bprint, "channel_layouts=");
413 av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
414 } else if (ofp->ch_layouts) {
415 const AVChannelLayout *p;
417 av_bprintf(bprint, "channel_layouts=");
418 for (p = ofp->ch_layouts; p->nb_channels; p++) {
419 av_channel_layout_describe_bprint(p, bprint);
420 av_bprintf(bprint, "|");
422 if (bprint->len > 0)
423 bprint->str[--bprint->len] = '\0';
424 } else
425 return;
426 av_bprint_chars(bprint, ':', 1);
429 static int read_binary(void *logctx, const char *path,
430 uint8_t **data, int *len)
432 AVIOContext *io = NULL;
433 int64_t fsize;
434 int ret;
436 *data = NULL;
437 *len = 0;
439 ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
440 if (ret < 0) {
441 av_log(logctx, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
442 path, av_err2str(ret));
443 return ret;
446 fsize = avio_size(io);
447 if (fsize < 0 || fsize > INT_MAX) {
448 av_log(logctx, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
449 ret = AVERROR(EIO);
450 goto fail;
453 *data = av_malloc(fsize);
454 if (!*data) {
455 ret = AVERROR(ENOMEM);
456 goto fail;
459 ret = avio_read(io, *data, fsize);
460 if (ret != fsize) {
461 av_log(logctx, AV_LOG_ERROR, "Error reading file %s\n", path);
462 ret = ret < 0 ? ret : AVERROR(EIO);
463 goto fail;
466 *len = fsize;
468 ret = 0;
469 fail:
470 avio_close(io);
471 if (ret < 0) {
472 av_freep(data);
473 *len = 0;
475 return ret;
478 static int filter_opt_apply(void *logctx, AVFilterContext *f,
479 const char *key, const char *val)
481 const AVOption *o = NULL;
482 int ret;
484 ret = av_opt_set(f, key, val, AV_OPT_SEARCH_CHILDREN);
485 if (ret >= 0)
486 return 0;
488 if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
489 o = av_opt_find(f, key + 1, NULL, 0, AV_OPT_SEARCH_CHILDREN);
490 if (!o)
491 goto err_apply;
493 // key is a valid option name prefixed with '/'
494 // interpret value as a path from which to load the actual option value
495 key++;
497 if (o->type == AV_OPT_TYPE_BINARY) {
498 uint8_t *data;
499 int len;
501 ret = read_binary(logctx, val, &data, &len);
502 if (ret < 0)
503 goto err_load;
505 ret = av_opt_set_bin(f, key, data, len, AV_OPT_SEARCH_CHILDREN);
506 av_freep(&data);
507 } else {
508 char *data = file_read(val);
509 if (!data) {
510 ret = AVERROR(EIO);
511 goto err_load;
514 ret = av_opt_set(f, key, data, AV_OPT_SEARCH_CHILDREN);
515 av_freep(&data);
517 if (ret < 0)
518 goto err_apply;
520 return 0;
522 err_apply:
523 av_log(logctx, AV_LOG_ERROR,
524 "Error applying option '%s' to filter '%s': %s\n",
525 key, f->filter->name, av_err2str(ret));
526 return ret;
527 err_load:
528 av_log(logctx, AV_LOG_ERROR,
529 "Error loading value for option '%s' from file '%s'\n",
530 key, val);
531 return ret;
534 static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
536 for (size_t i = 0; i < seg->nb_chains; i++) {
537 AVFilterChain *ch = seg->chains[i];
539 for (size_t j = 0; j < ch->nb_filters; j++) {
540 AVFilterParams *p = ch->filters[j];
541 const AVDictionaryEntry *e = NULL;
543 av_assert0(p->filter);
545 while ((e = av_dict_iterate(p->opts, e))) {
546 int ret = filter_opt_apply(logctx, p->filter, e->key, e->value);
547 if (ret < 0)
548 return ret;
551 av_dict_free(&p->opts);
555 return 0;
558 static int graph_parse(void *logctx,
559 AVFilterGraph *graph, const char *desc,
560 AVFilterInOut **inputs, AVFilterInOut **outputs,
561 AVBufferRef *hw_device)
563 AVFilterGraphSegment *seg;
564 int ret;
566 *inputs = NULL;
567 *outputs = NULL;
569 ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
570 if (ret < 0)
571 return ret;
573 ret = avfilter_graph_segment_create_filters(seg, 0);
574 if (ret < 0)
575 goto fail;
577 if (hw_device) {
578 for (int i = 0; i < graph->nb_filters; i++) {
579 AVFilterContext *f = graph->filters[i];
581 if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
582 continue;
583 f->hw_device_ctx = av_buffer_ref(hw_device);
584 if (!f->hw_device_ctx) {
585 ret = AVERROR(ENOMEM);
586 goto fail;
591 ret = graph_opts_apply(logctx, seg);
592 if (ret < 0)
593 goto fail;
595 ret = avfilter_graph_segment_apply(seg, 0, inputs, outputs);
597 fail:
598 avfilter_graph_segment_free(&seg);
599 return ret;
602 // Filters can be configured only if the formats of all inputs are known.
603 static int ifilter_has_all_input_formats(FilterGraph *fg)
605 for (int i = 0; i < fg->nb_inputs; i++) {
606 InputFilterPriv *ifp = ifp_from_ifilter(fg->inputs[i]);
607 if (ifp->format < 0)
608 return 0;
610 return 1;
613 static int filter_thread(void *arg);
615 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
617 AVFilterContext *ctx = inout->filter_ctx;
618 AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
619 int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
621 if (nb_pads > 1)
622 return av_strdup(ctx->filter->name);
623 return av_asprintf("%s:%s", ctx->filter->name,
624 avfilter_pad_get_name(pads, inout->pad_idx));
627 static const char *ofilter_item_name(void *obj)
629 OutputFilterPriv *ofp = obj;
630 return ofp->log_name;
633 static const AVClass ofilter_class = {
634 .class_name = "OutputFilter",
635 .version = LIBAVUTIL_VERSION_INT,
636 .item_name = ofilter_item_name,
637 .parent_log_context_offset = offsetof(OutputFilterPriv, log_parent),
638 .category = AV_CLASS_CATEGORY_FILTER,
641 static OutputFilter *ofilter_alloc(FilterGraph *fg, enum AVMediaType type)
643 OutputFilterPriv *ofp;
644 OutputFilter *ofilter;
646 ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
647 if (!ofp)
648 return NULL;
650 ofilter = &ofp->ofilter;
651 ofilter->class = &ofilter_class;
652 ofp->log_parent = fg;
653 ofilter->graph = fg;
654 ofilter->type = type;
655 ofp->format = -1;
656 ofp->color_space = AVCOL_SPC_UNSPECIFIED;
657 ofp->color_range = AVCOL_RANGE_UNSPECIFIED;
658 ofp->index = fg->nb_outputs - 1;
660 snprintf(ofp->log_name, sizeof(ofp->log_name), "%co%d",
661 av_get_media_type_string(type)[0], ofp->index);
663 return ofilter;
666 static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist,
667 const ViewSpecifier *vs)
669 InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
670 FilterGraphPriv *fgp = fgp_from_fg(ifilter->graph);
671 SchedulerNode src;
672 int ret;
674 av_assert0(!ifp->bound);
675 ifp->bound = 1;
677 if (ifp->type != ist->par->codec_type &&
678 !(ifp->type == AVMEDIA_TYPE_VIDEO && ist->par->codec_type == AVMEDIA_TYPE_SUBTITLE)) {
679 av_log(fgp, AV_LOG_ERROR, "Tried to connect %s stream to %s filtergraph input\n",
680 av_get_media_type_string(ist->par->codec_type), av_get_media_type_string(ifp->type));
681 return AVERROR(EINVAL);
684 ifp->type_src = ist->st->codecpar->codec_type;
686 ifp->opts.fallback = av_frame_alloc();
687 if (!ifp->opts.fallback)
688 return AVERROR(ENOMEM);
690 ret = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph),
691 vs, &ifp->opts, &src);
692 if (ret < 0)
693 return ret;
695 ret = sch_connect(fgp->sch,
696 src, SCH_FILTER_IN(fgp->sch_idx, ifp->index));
697 if (ret < 0)
698 return ret;
700 if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
701 ifp->sub2video.frame = av_frame_alloc();
702 if (!ifp->sub2video.frame)
703 return AVERROR(ENOMEM);
705 ifp->width = ifp->opts.sub2video_width;
706 ifp->height = ifp->opts.sub2video_height;
708 /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
709 palettes for all rectangles are identical or compatible */
710 ifp->format = AV_PIX_FMT_RGB32;
712 ifp->time_base = AV_TIME_BASE_Q;
714 av_log(fgp, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n",
715 ifp->width, ifp->height);
718 return 0;
721 static int ifilter_bind_dec(InputFilterPriv *ifp, Decoder *dec,
722 const ViewSpecifier *vs)
724 FilterGraphPriv *fgp = fgp_from_fg(ifp->ifilter.graph);
725 SchedulerNode src;
726 int ret;
728 av_assert0(!ifp->bound);
729 ifp->bound = 1;
731 if (ifp->type != dec->type) {
732 av_log(fgp, AV_LOG_ERROR, "Tried to connect %s decoder to %s filtergraph input\n",
733 av_get_media_type_string(dec->type), av_get_media_type_string(ifp->type));
734 return AVERROR(EINVAL);
737 ifp->type_src = ifp->type;
739 ret = dec_filter_add(dec, &ifp->ifilter, &ifp->opts, vs, &src);
740 if (ret < 0)
741 return ret;
743 ret = sch_connect(fgp->sch, src, SCH_FILTER_IN(fgp->sch_idx, ifp->index));
744 if (ret < 0)
745 return ret;
747 return 0;
750 static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed,
751 const AVChannelLayout *layout_requested)
753 int i, err;
755 if (layout_requested->order != AV_CHANNEL_ORDER_UNSPEC) {
756 /* Pass the layout through for all orders but UNSPEC */
757 err = av_channel_layout_copy(&f->ch_layout, layout_requested);
758 if (err < 0)
759 return err;
760 return 0;
763 /* Requested layout is of order UNSPEC */
764 if (!layouts_allowed) {
765 /* Use the default native layout for the requested amount of channels when the
766 encoder doesn't have a list of supported layouts */
767 av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
768 return 0;
770 /* Encoder has a list of supported layouts. Pick the first layout in it with the
771 same amount of channels as the requested layout */
772 for (i = 0; layouts_allowed[i].nb_channels; i++) {
773 if (layouts_allowed[i].nb_channels == layout_requested->nb_channels)
774 break;
776 if (layouts_allowed[i].nb_channels) {
777 /* Use it if one is found */
778 err = av_channel_layout_copy(&f->ch_layout, &layouts_allowed[i]);
779 if (err < 0)
780 return err;
781 return 0;
783 /* If no layout for the amount of channels requested was found, use the default
784 native layout for it. */
785 av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
787 return 0;
790 int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc,
791 const OutputFilterOptions *opts)
793 OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
794 FilterGraph *fg = ofilter->graph;
795 FilterGraphPriv *fgp = fgp_from_fg(fg);
796 int ret;
798 av_assert0(!ofilter->bound);
799 av_assert0(!opts->enc ||
800 ofilter->type == opts->enc->type);
802 ofilter->bound = 1;
803 av_freep(&ofilter->linklabel);
805 ofp->flags = opts->flags;
806 ofp->ts_offset = opts->ts_offset;
807 ofp->enc_timebase = opts->output_tb;
809 ofp->trim_start_us = opts->trim_start_us;
810 ofp->trim_duration_us = opts->trim_duration_us;
812 ofp->name = av_strdup(opts->name);
813 if (!ofp->name)
814 return AVERROR(EINVAL);
816 ret = av_dict_copy(&ofp->sws_opts, opts->sws_opts, 0);
817 if (ret < 0)
818 return ret;
820 ret = av_dict_copy(&ofp->swr_opts, opts->swr_opts, 0);
821 if (ret < 0)
822 return ret;
824 if (opts->flags & OFILTER_FLAG_AUDIO_24BIT)
825 av_dict_set(&ofp->swr_opts, "output_sample_bits", "24", 0);
827 if (fgp->is_simple) {
828 // for simple filtergraph there is just one output,
829 // so use only graph-level information for logging
830 ofp->log_parent = NULL;
831 av_strlcpy(ofp->log_name, fgp->log_name, sizeof(ofp->log_name));
832 } else
833 av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofp->name);
835 switch (ofilter->type) {
836 case AVMEDIA_TYPE_VIDEO:
837 ofp->width = opts->width;
838 ofp->height = opts->height;
839 if (opts->format != AV_PIX_FMT_NONE) {
840 ofp->format = opts->format;
841 } else
842 ofp->formats = opts->formats;
844 if (opts->color_space != AVCOL_SPC_UNSPECIFIED)
845 ofp->color_space = opts->color_space;
846 else
847 ofp->color_spaces = opts->color_spaces;
849 if (opts->color_range != AVCOL_RANGE_UNSPECIFIED)
850 ofp->color_range = opts->color_range;
851 else
852 ofp->color_ranges = opts->color_ranges;
854 fgp->disable_conversions |= !!(ofp->flags & OFILTER_FLAG_DISABLE_CONVERT);
856 ofp->fps.last_frame = av_frame_alloc();
857 if (!ofp->fps.last_frame)
858 return AVERROR(ENOMEM);
860 ofp->fps.vsync_method = opts->vsync_method;
861 ofp->fps.framerate = opts->frame_rate;
862 ofp->fps.framerate_max = opts->max_frame_rate;
863 ofp->fps.framerate_supported = opts->frame_rates;
865 // reduce frame rate for mpeg4 to be within the spec limits
866 if (opts->enc && opts->enc->id == AV_CODEC_ID_MPEG4)
867 ofp->fps.framerate_clip = 65535;
869 ofp->fps.dup_warning = 1000;
871 break;
872 case AVMEDIA_TYPE_AUDIO:
873 if (opts->format != AV_SAMPLE_FMT_NONE) {
874 ofp->format = opts->format;
875 } else {
876 ofp->formats = opts->formats;
878 if (opts->sample_rate) {
879 ofp->sample_rate = opts->sample_rate;
880 } else
881 ofp->sample_rates = opts->sample_rates;
882 if (opts->ch_layout.nb_channels) {
883 int ret = set_channel_layout(ofp, opts->ch_layouts, &opts->ch_layout);
884 if (ret < 0)
885 return ret;
886 } else {
887 ofp->ch_layouts = opts->ch_layouts;
889 break;
892 ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fgp->sch_idx, ofp->index),
893 SCH_ENC(sched_idx_enc));
894 if (ret < 0)
895 return ret;
897 return 0;
900 static int ofilter_bind_ifilter(OutputFilter *ofilter, InputFilterPriv *ifp,
901 const OutputFilterOptions *opts)
903 OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
905 av_assert0(!ofilter->bound);
906 av_assert0(ofilter->type == ifp->type);
908 ofilter->bound = 1;
909 av_freep(&ofilter->linklabel);
911 ofp->name = av_strdup(opts->name);
912 if (!ofp->name)
913 return AVERROR(EINVAL);
915 av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofp->name);
917 return 0;
920 static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
922 FilterGraphPriv *fgp = fgp_from_fg(ifp->ifilter.graph);
923 OutputFilter *ofilter_src = fg_src->outputs[out_idx];
924 OutputFilterOptions opts;
925 char name[32];
926 int ret;
928 av_assert0(!ifp->bound);
929 ifp->bound = 1;
931 if (ifp->type != ofilter_src->type) {
932 av_log(fgp, AV_LOG_ERROR, "Tried to connect %s output to %s input\n",
933 av_get_media_type_string(ofilter_src->type),
934 av_get_media_type_string(ifp->type));
935 return AVERROR(EINVAL);
938 ifp->type_src = ifp->type;
940 memset(&opts, 0, sizeof(opts));
942 snprintf(name, sizeof(name), "fg:%d:%d", fgp->fg.index, ifp->index);
943 opts.name = name;
945 ret = ofilter_bind_ifilter(ofilter_src, ifp, &opts);
946 if (ret < 0)
947 return ret;
949 ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fg_src->index, out_idx),
950 SCH_FILTER_IN(fgp->sch_idx, ifp->index));
951 if (ret < 0)
952 return ret;
954 return 0;
957 static InputFilter *ifilter_alloc(FilterGraph *fg)
959 InputFilterPriv *ifp;
960 InputFilter *ifilter;
962 ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
963 if (!ifp)
964 return NULL;
966 ifilter = &ifp->ifilter;
967 ifilter->graph = fg;
969 ifp->frame = av_frame_alloc();
970 if (!ifp->frame)
971 return NULL;
973 ifp->index = fg->nb_inputs - 1;
974 ifp->format = -1;
975 ifp->color_space = AVCOL_SPC_UNSPECIFIED;
976 ifp->color_range = AVCOL_RANGE_UNSPECIFIED;
978 ifp->frame_queue = av_fifo_alloc2(8, sizeof(AVFrame*), AV_FIFO_FLAG_AUTO_GROW);
979 if (!ifp->frame_queue)
980 return NULL;
982 return ifilter;
985 void fg_free(FilterGraph **pfg)
987 FilterGraph *fg = *pfg;
988 FilterGraphPriv *fgp;
990 if (!fg)
991 return;
992 fgp = fgp_from_fg(fg);
994 for (int j = 0; j < fg->nb_inputs; j++) {
995 InputFilter *ifilter = fg->inputs[j];
996 InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
998 if (ifp->frame_queue) {
999 AVFrame *frame;
1000 while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
1001 av_frame_free(&frame);
1002 av_fifo_freep2(&ifp->frame_queue);
1004 av_frame_free(&ifp->sub2video.frame);
1006 av_frame_free(&ifp->frame);
1007 av_frame_free(&ifp->opts.fallback);
1009 av_buffer_unref(&ifp->hw_frames_ctx);
1010 av_freep(&ifp->linklabel);
1011 av_freep(&ifp->opts.name);
1012 av_freep(&ifilter->name);
1013 av_freep(&fg->inputs[j]);
1015 av_freep(&fg->inputs);
1016 for (int j = 0; j < fg->nb_outputs; j++) {
1017 OutputFilter *ofilter = fg->outputs[j];
1018 OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1020 av_frame_free(&ofp->fps.last_frame);
1021 av_dict_free(&ofp->sws_opts);
1022 av_dict_free(&ofp->swr_opts);
1024 av_freep(&ofilter->linklabel);
1025 av_freep(&ofilter->name);
1026 av_freep(&ofilter->apad);
1027 av_freep(&ofp->name);
1028 av_channel_layout_uninit(&ofp->ch_layout);
1029 av_freep(&fg->outputs[j]);
1031 av_freep(&fg->outputs);
1032 av_freep(&fgp->graph_desc);
1033 av_freep(&fgp->nb_threads);
1035 av_frame_free(&fgp->frame);
1036 av_frame_free(&fgp->frame_enc);
1038 av_freep(pfg);
1041 static const char *fg_item_name(void *obj)
1043 const FilterGraphPriv *fgp = obj;
1045 return fgp->log_name;
1048 static const AVClass fg_class = {
1049 .class_name = "FilterGraph",
1050 .version = LIBAVUTIL_VERSION_INT,
1051 .item_name = fg_item_name,
1052 .category = AV_CLASS_CATEGORY_FILTER,
1055 int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
1057 FilterGraphPriv *fgp;
1058 FilterGraph *fg;
1060 AVFilterInOut *inputs, *outputs;
1061 AVFilterGraph *graph;
1062 int ret = 0;
1064 fgp = av_mallocz(sizeof(*fgp));
1065 if (!fgp) {
1066 av_freep(&graph_desc);
1067 return AVERROR(ENOMEM);
1069 fg = &fgp->fg;
1071 if (pfg) {
1072 *pfg = fg;
1073 fg->index = -1;
1074 } else {
1075 ret = av_dynarray_add_nofree(&filtergraphs, &nb_filtergraphs, fgp);
1076 if (ret < 0) {
1077 av_freep(&graph_desc);
1078 av_freep(&fgp);
1079 return ret;
1082 fg->index = nb_filtergraphs - 1;
1085 fg->class = &fg_class;
1086 fgp->graph_desc = graph_desc;
1087 fgp->disable_conversions = !auto_conversion_filters;
1088 fgp->sch = sch;
1090 snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
1092 fgp->frame = av_frame_alloc();
1093 fgp->frame_enc = av_frame_alloc();
1094 if (!fgp->frame || !fgp->frame_enc)
1095 return AVERROR(ENOMEM);
1097 /* this graph is only used for determining the kinds of inputs
1098 * and outputs we have, and is discarded on exit from this function */
1099 graph = avfilter_graph_alloc();
1100 if (!graph)
1101 return AVERROR(ENOMEM);;
1102 graph->nb_threads = 1;
1104 ret = graph_parse(fg, graph, fgp->graph_desc, &inputs, &outputs,
1105 hw_device_for_filter());
1106 if (ret < 0)
1107 goto fail;
1109 for (unsigned i = 0; i < graph->nb_filters; i++) {
1110 const AVFilter *f = graph->filters[i]->filter;
1111 if ((!avfilter_filter_pad_count(f, 0) &&
1112 !(f->flags & AVFILTER_FLAG_DYNAMIC_INPUTS)) ||
1113 !strcmp(f->name, "apad")) {
1114 fgp->have_sources = 1;
1115 break;
1119 for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
1120 InputFilter *const ifilter = ifilter_alloc(fg);
1121 InputFilterPriv *ifp;
1123 if (!ifilter) {
1124 ret = AVERROR(ENOMEM);
1125 goto fail;
1128 ifp = ifp_from_ifilter(ifilter);
1129 ifp->linklabel = cur->name;
1130 cur->name = NULL;
1132 ifp->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
1133 cur->pad_idx);
1135 if (ifp->type != AVMEDIA_TYPE_VIDEO && ifp->type != AVMEDIA_TYPE_AUDIO) {
1136 av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
1137 "currently.\n");
1138 ret = AVERROR(ENOSYS);
1139 goto fail;
1142 ifilter->name = describe_filter_link(fg, cur, 1);
1143 if (!ifilter->name) {
1144 ret = AVERROR(ENOMEM);
1145 goto fail;
1149 for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
1150 const enum AVMediaType type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
1151 cur->pad_idx);
1152 OutputFilter *const ofilter = ofilter_alloc(fg, type);
1154 if (!ofilter) {
1155 ret = AVERROR(ENOMEM);
1156 goto fail;
1159 ofilter->linklabel = cur->name;
1160 cur->name = NULL;
1162 ofilter->name = describe_filter_link(fg, cur, 0);
1163 if (!ofilter->name) {
1164 ret = AVERROR(ENOMEM);
1165 goto fail;
1169 if (!fg->nb_outputs) {
1170 av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
1171 ret = AVERROR(ENOSYS);
1172 goto fail;
1175 ret = sch_add_filtergraph(sch, fg->nb_inputs, fg->nb_outputs,
1176 filter_thread, fgp);
1177 if (ret < 0)
1178 goto fail;
1179 fgp->sch_idx = ret;
1181 fail:
1182 avfilter_inout_free(&inputs);
1183 avfilter_inout_free(&outputs);
1184 avfilter_graph_free(&graph);
1186 if (ret < 0)
1187 return ret;
1189 return 0;
1192 int fg_create_simple(FilterGraph **pfg,
1193 InputStream *ist,
1194 char *graph_desc,
1195 Scheduler *sch, unsigned sched_idx_enc,
1196 const OutputFilterOptions *opts)
1198 const enum AVMediaType type = ist->par->codec_type;
1199 FilterGraph *fg;
1200 FilterGraphPriv *fgp;
1201 int ret;
1203 ret = fg_create(pfg, graph_desc, sch);
1204 if (ret < 0)
1205 return ret;
1206 fg = *pfg;
1207 fgp = fgp_from_fg(fg);
1209 fgp->is_simple = 1;
1211 snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf%s",
1212 av_get_media_type_string(type)[0], opts->name);
1214 if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
1215 av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1216 "to have exactly 1 input and 1 output. "
1217 "However, it had %d input(s) and %d output(s). Please adjust, "
1218 "or use a complex filtergraph (-filter_complex) instead.\n",
1219 graph_desc, fg->nb_inputs, fg->nb_outputs);
1220 return AVERROR(EINVAL);
1222 if (fg->outputs[0]->type != type) {
1223 av_log(fg, AV_LOG_ERROR, "Filtergraph has a %s output, cannot connect "
1224 "it to %s output stream\n",
1225 av_get_media_type_string(fg->outputs[0]->type),
1226 av_get_media_type_string(type));
1227 return AVERROR(EINVAL);
1230 ret = ifilter_bind_ist(fg->inputs[0], ist, opts->vs);
1231 if (ret < 0)
1232 return ret;
1234 ret = ofilter_bind_enc(fg->outputs[0], sched_idx_enc, opts);
1235 if (ret < 0)
1236 return ret;
1238 if (opts->nb_threads) {
1239 av_freep(&fgp->nb_threads);
1240 fgp->nb_threads = av_strdup(opts->nb_threads);
1241 if (!fgp->nb_threads)
1242 return AVERROR(ENOMEM);
1245 return 0;
1248 static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter)
1250 FilterGraphPriv *fgp = fgp_from_fg(fg);
1251 InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1252 InputStream *ist = NULL;
1253 enum AVMediaType type = ifp->type;
1254 ViewSpecifier vs = { .type = VIEW_SPECIFIER_TYPE_NONE };
1255 const char *spec;
1256 char *p;
1257 int i, ret;
1259 if (ifp->linklabel && !strncmp(ifp->linklabel, "dec:", 4)) {
1260 // bind to a standalone decoder
1261 int dec_idx;
1263 dec_idx = strtol(ifp->linklabel + 4, &p, 0);
1264 if (dec_idx < 0 || dec_idx >= nb_decoders) {
1265 av_log(fg, AV_LOG_ERROR, "Invalid decoder index %d in filtergraph description %s\n",
1266 dec_idx, fgp->graph_desc);
1267 return AVERROR(EINVAL);
1270 if (type == AVMEDIA_TYPE_VIDEO) {
1271 spec = *p == ':' ? p + 1 : p;
1272 ret = view_specifier_parse(&spec, &vs);
1273 if (ret < 0)
1274 return ret;
1277 ret = ifilter_bind_dec(ifp, decoders[dec_idx], &vs);
1278 if (ret < 0)
1279 av_log(fg, AV_LOG_ERROR, "Error binding a decoder to filtergraph input %s\n",
1280 ifilter->name);
1281 return ret;
1282 } else if (ifp->linklabel) {
1283 StreamSpecifier ss;
1284 AVFormatContext *s;
1285 AVStream *st = NULL;
1286 int file_idx;
1288 // try finding an unbound filtergraph output with this label
1289 for (int i = 0; i < nb_filtergraphs; i++) {
1290 FilterGraph *fg_src = filtergraphs[i];
1292 if (fg == fg_src)
1293 continue;
1295 for (int j = 0; j < fg_src->nb_outputs; j++) {
1296 OutputFilter *ofilter = fg_src->outputs[j];
1298 if (!ofilter->bound && ofilter->linklabel &&
1299 !strcmp(ofilter->linklabel, ifp->linklabel)) {
1300 av_log(fg, AV_LOG_VERBOSE,
1301 "Binding input with label '%s' to filtergraph output %d:%d\n",
1302 ifp->linklabel, i, j);
1304 ret = ifilter_bind_fg(ifp, fg_src, j);
1305 if (ret < 0)
1306 av_log(fg, AV_LOG_ERROR, "Error binding filtergraph input %s\n",
1307 ifp->linklabel);
1308 return ret;
1313 // bind to an explicitly specified demuxer stream
1314 file_idx = strtol(ifp->linklabel, &p, 0);
1315 if (file_idx < 0 || file_idx >= nb_input_files) {
1316 av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
1317 file_idx, fgp->graph_desc);
1318 return AVERROR(EINVAL);
1320 s = input_files[file_idx]->ctx;
1322 ret = stream_specifier_parse(&ss, *p == ':' ? p + 1 : p, 1, fg);
1323 if (ret < 0) {
1324 av_log(fg, AV_LOG_ERROR, "Invalid stream specifier: %s\n", p);
1325 return ret;
1328 if (type == AVMEDIA_TYPE_VIDEO) {
1329 spec = ss.remainder ? ss.remainder : "";
1330 ret = view_specifier_parse(&spec, &vs);
1331 if (ret < 0) {
1332 stream_specifier_uninit(&ss);
1333 return ret;
1337 for (i = 0; i < s->nb_streams; i++) {
1338 enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
1339 if (stream_type != type &&
1340 !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
1341 type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
1342 continue;
1343 if (stream_specifier_match(&ss, s, s->streams[i], fg)) {
1344 st = s->streams[i];
1345 break;
1348 stream_specifier_uninit(&ss);
1349 if (!st) {
1350 av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
1351 "matches no streams.\n", p, fgp->graph_desc);
1352 return AVERROR(EINVAL);
1354 ist = input_files[file_idx]->streams[st->index];
1356 av_log(fg, AV_LOG_VERBOSE,
1357 "Binding input with label '%s' to input stream %d:%d\n",
1358 ifp->linklabel, ist->file->index, ist->index);
1359 } else {
1360 ist = ist_find_unused(type);
1361 if (!ist) {
1362 av_log(fg, AV_LOG_FATAL,
1363 "Cannot find an unused %s input stream to feed the "
1364 "unlabeled input pad %s.\n",
1365 av_get_media_type_string(type), ifilter->name);
1366 return AVERROR(EINVAL);
1369 av_log(fg, AV_LOG_VERBOSE,
1370 "Binding unlabeled input %d to input stream %d:%d\n",
1371 ifp->index, ist->file->index, ist->index);
1373 av_assert0(ist);
1375 ret = ifilter_bind_ist(ifilter, ist, &vs);
1376 if (ret < 0) {
1377 av_log(fg, AV_LOG_ERROR,
1378 "Error binding an input stream to complex filtergraph input %s.\n",
1379 ifilter->name);
1380 return ret;
1383 return 0;
1386 static int bind_inputs(FilterGraph *fg)
1388 // bind filtergraph inputs to input streams or other filtergraphs
1389 for (int i = 0; i < fg->nb_inputs; i++) {
1390 InputFilterPriv *ifp = ifp_from_ifilter(fg->inputs[i]);
1391 int ret;
1393 if (ifp->bound)
1394 continue;
1396 ret = fg_complex_bind_input(fg, &ifp->ifilter);
1397 if (ret < 0)
1398 return ret;
1401 return 0;
1404 int fg_finalise_bindings(void)
1406 int ret;
1408 for (int i = 0; i < nb_filtergraphs; i++) {
1409 ret = bind_inputs(filtergraphs[i]);
1410 if (ret < 0)
1411 return ret;
1414 // check that all outputs were bound
1415 for (int i = 0; i < nb_filtergraphs; i++) {
1416 FilterGraph *fg = filtergraphs[i];
1418 for (int j = 0; j < fg->nb_outputs; j++) {
1419 OutputFilter *output = fg->outputs[j];
1420 if (!output->bound) {
1421 av_log(fg, AV_LOG_FATAL,
1422 "Filter '%s' has output %d (%s) unconnected\n",
1423 output->name, j,
1424 output->linklabel ? (const char *)output->linklabel : "unlabeled");
1425 return AVERROR(EINVAL);
1430 return 0;
1433 static int insert_trim(void *logctx, int64_t start_time, int64_t duration,
1434 AVFilterContext **last_filter, int *pad_idx,
1435 const char *filter_name)
1437 AVFilterGraph *graph = (*last_filter)->graph;
1438 AVFilterContext *ctx;
1439 const AVFilter *trim;
1440 enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
1441 const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
1442 int ret = 0;
1444 if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
1445 return 0;
1447 trim = avfilter_get_by_name(name);
1448 if (!trim) {
1449 av_log(logctx, AV_LOG_ERROR, "%s filter not present, cannot limit "
1450 "recording time.\n", name);
1451 return AVERROR_FILTER_NOT_FOUND;
1454 ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
1455 if (!ctx)
1456 return AVERROR(ENOMEM);
1458 if (duration != INT64_MAX) {
1459 ret = av_opt_set_int(ctx, "durationi", duration,
1460 AV_OPT_SEARCH_CHILDREN);
1462 if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
1463 ret = av_opt_set_int(ctx, "starti", start_time,
1464 AV_OPT_SEARCH_CHILDREN);
1466 if (ret < 0) {
1467 av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
1468 return ret;
1471 ret = avfilter_init_str(ctx, NULL);
1472 if (ret < 0)
1473 return ret;
1475 ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1476 if (ret < 0)
1477 return ret;
1479 *last_filter = ctx;
1480 *pad_idx = 0;
1481 return 0;
1484 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
1485 const char *filter_name, const char *args)
1487 AVFilterGraph *graph = (*last_filter)->graph;
1488 const AVFilter *filter = avfilter_get_by_name(filter_name);
1489 AVFilterContext *ctx;
1490 int ret;
1492 if (!filter)
1493 return AVERROR_BUG;
1495 ret = avfilter_graph_create_filter(&ctx,
1496 filter,
1497 filter_name, args, NULL, graph);
1498 if (ret < 0)
1499 return ret;
1501 ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1502 if (ret < 0)
1503 return ret;
1505 *last_filter = ctx;
1506 *pad_idx = 0;
1507 return 0;
1510 static int configure_output_video_filter(FilterGraphPriv *fgp, AVFilterGraph *graph,
1511 OutputFilter *ofilter, AVFilterInOut *out)
1513 OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1514 AVFilterContext *last_filter = out->filter_ctx;
1515 AVBPrint bprint;
1516 int pad_idx = out->pad_idx;
1517 int ret;
1518 char name[255];
1520 snprintf(name, sizeof(name), "out_%s", ofp->name);
1521 ret = avfilter_graph_create_filter(&ofp->filter,
1522 avfilter_get_by_name("buffersink"),
1523 name, NULL, NULL, graph);
1525 if (ret < 0)
1526 return ret;
1528 if ((ofp->width || ofp->height) && (ofp->flags & OFILTER_FLAG_AUTOSCALE)) {
1529 char args[255];
1530 AVFilterContext *filter;
1531 const AVDictionaryEntry *e = NULL;
1533 snprintf(args, sizeof(args), "%d:%d",
1534 ofp->width, ofp->height);
1536 while ((e = av_dict_iterate(ofp->sws_opts, e))) {
1537 av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
1540 snprintf(name, sizeof(name), "scaler_out_%s", ofp->name);
1541 if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
1542 name, args, NULL, graph)) < 0)
1543 return ret;
1544 if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1545 return ret;
1547 last_filter = filter;
1548 pad_idx = 0;
1551 av_assert0(!(ofp->flags & OFILTER_FLAG_DISABLE_CONVERT) ||
1552 ofp->format != AV_PIX_FMT_NONE || !ofp->formats);
1553 av_bprint_init(&bprint, 0, AV_BPRINT_SIZE_UNLIMITED);
1554 choose_pix_fmts(ofp, &bprint);
1555 choose_color_spaces(ofp, &bprint);
1556 choose_color_ranges(ofp, &bprint);
1557 if (!av_bprint_is_complete(&bprint))
1558 return AVERROR(ENOMEM);
1560 if (bprint.len) {
1561 AVFilterContext *filter;
1563 ret = avfilter_graph_create_filter(&filter,
1564 avfilter_get_by_name("format"),
1565 "format", bprint.str, NULL, graph);
1566 av_bprint_finalize(&bprint, NULL);
1567 if (ret < 0)
1568 return ret;
1569 if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1570 return ret;
1572 last_filter = filter;
1573 pad_idx = 0;
1576 snprintf(name, sizeof(name), "trim_out_%s", ofp->name);
1577 ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1578 &last_filter, &pad_idx, name);
1579 if (ret < 0)
1580 return ret;
1583 if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1584 return ret;
1586 return 0;
1589 static int configure_output_audio_filter(FilterGraphPriv *fgp, AVFilterGraph *graph,
1590 OutputFilter *ofilter, AVFilterInOut *out)
1592 OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1593 AVFilterContext *last_filter = out->filter_ctx;
1594 int pad_idx = out->pad_idx;
1595 AVBPrint args;
1596 char name[255];
1597 int ret;
1599 snprintf(name, sizeof(name), "out_%s", ofp->name);
1600 ret = avfilter_graph_create_filter(&ofp->filter,
1601 avfilter_get_by_name("abuffersink"),
1602 name, NULL, NULL, graph);
1603 if (ret < 0)
1604 return ret;
1606 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1607 AVFilterContext *filt_ctx; \
1609 av_log(ofilter, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1610 "similarly to -af " filter_name "=%s.\n", arg); \
1612 ret = avfilter_graph_create_filter(&filt_ctx, \
1613 avfilter_get_by_name(filter_name), \
1614 filter_name, arg, NULL, graph); \
1615 if (ret < 0) \
1616 goto fail; \
1618 ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1619 if (ret < 0) \
1620 goto fail; \
1622 last_filter = filt_ctx; \
1623 pad_idx = 0; \
1624 } while (0)
1625 av_bprint_init(&args, 0, AV_BPRINT_SIZE_UNLIMITED);
1627 choose_sample_fmts(ofp, &args);
1628 choose_sample_rates(ofp, &args);
1629 choose_channel_layouts(ofp, &args);
1630 if (!av_bprint_is_complete(&args)) {
1631 ret = AVERROR(ENOMEM);
1632 goto fail;
1634 if (args.len) {
1635 AVFilterContext *format;
1637 snprintf(name, sizeof(name), "format_out_%s", ofp->name);
1638 ret = avfilter_graph_create_filter(&format,
1639 avfilter_get_by_name("aformat"),
1640 name, args.str, NULL, graph);
1641 if (ret < 0)
1642 goto fail;
1644 ret = avfilter_link(last_filter, pad_idx, format, 0);
1645 if (ret < 0)
1646 goto fail;
1648 last_filter = format;
1649 pad_idx = 0;
1652 if (ofilter->apad) {
1653 AUTO_INSERT_FILTER("-apad", "apad", ofilter->apad);
1654 fgp->have_sources = 1;
1657 snprintf(name, sizeof(name), "trim for output %s", ofp->name);
1658 ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1659 &last_filter, &pad_idx, name);
1660 if (ret < 0)
1661 goto fail;
1663 if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1664 goto fail;
1665 fail:
1666 av_bprint_finalize(&args, NULL);
1668 return ret;
1671 static int configure_output_filter(FilterGraphPriv *fgp, AVFilterGraph *graph,
1672 OutputFilter *ofilter, AVFilterInOut *out)
1674 switch (ofilter->type) {
1675 case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fgp, graph, ofilter, out);
1676 case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fgp, graph, ofilter, out);
1677 default: av_assert0(0); return 0;
1681 static void sub2video_prepare(InputFilterPriv *ifp)
1683 ifp->sub2video.last_pts = INT64_MIN;
1684 ifp->sub2video.end_pts = INT64_MIN;
1686 /* sub2video structure has been (re-)initialized.
1687 Mark it as such so that the system will be
1688 initialized with the first received heartbeat. */
1689 ifp->sub2video.initialize = 1;
1692 static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph,
1693 InputFilter *ifilter, AVFilterInOut *in)
1695 InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1697 AVFilterContext *last_filter;
1698 const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
1699 const AVPixFmtDescriptor *desc;
1700 char name[255];
1701 int ret, pad_idx = 0;
1702 AVBufferSrcParameters *par = av_buffersrc_parameters_alloc();
1703 if (!par)
1704 return AVERROR(ENOMEM);
1706 if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
1707 sub2video_prepare(ifp);
1709 snprintf(name, sizeof(name), "graph %d input from stream %s", fg->index,
1710 ifp->opts.name);
1712 ifp->filter = avfilter_graph_alloc_filter(graph, buffer_filt, name);
1713 if (!ifp->filter) {
1714 ret = AVERROR(ENOMEM);
1715 goto fail;
1718 par->format = ifp->format;
1719 par->time_base = ifp->time_base;
1720 par->frame_rate = ifp->opts.framerate;
1721 par->width = ifp->width;
1722 par->height = ifp->height;
1723 par->sample_aspect_ratio = ifp->sample_aspect_ratio.den > 0 ?
1724 ifp->sample_aspect_ratio : (AVRational){ 0, 1 };
1725 par->color_space = ifp->color_space;
1726 par->color_range = ifp->color_range;
1727 par->hw_frames_ctx = ifp->hw_frames_ctx;
1728 ret = av_buffersrc_parameters_set(ifp->filter, par);
1729 if (ret < 0)
1730 goto fail;
1731 av_freep(&par);
1733 ret = avfilter_init_dict(ifp->filter, NULL);
1734 if (ret < 0)
1735 goto fail;
1737 last_filter = ifp->filter;
1739 desc = av_pix_fmt_desc_get(ifp->format);
1740 av_assert0(desc);
1742 if ((ifp->opts.flags & IFILTER_FLAG_CROP)) {
1743 char crop_buf[64];
1744 snprintf(crop_buf, sizeof(crop_buf), "w=iw-%u-%u:h=ih-%u-%u:x=%u:y=%u",
1745 ifp->opts.crop_left, ifp->opts.crop_right,
1746 ifp->opts.crop_top, ifp->opts.crop_bottom,
1747 ifp->opts.crop_left, ifp->opts.crop_top);
1748 ret = insert_filter(&last_filter, &pad_idx, "crop", crop_buf);
1749 if (ret < 0)
1750 return ret;
1753 // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
1754 ifp->displaymatrix_applied = 0;
1755 if ((ifp->opts.flags & IFILTER_FLAG_AUTOROTATE) &&
1756 !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
1757 int32_t *displaymatrix = ifp->displaymatrix;
1758 double theta;
1760 theta = get_rotation(displaymatrix);
1762 if (fabs(theta - 90) < 1.0) {
1763 ret = insert_filter(&last_filter, &pad_idx, "transpose",
1764 displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1765 } else if (fabs(theta - 180) < 1.0) {
1766 if (displaymatrix[0] < 0) {
1767 ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1768 if (ret < 0)
1769 return ret;
1771 if (displaymatrix[4] < 0) {
1772 ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1774 } else if (fabs(theta - 270) < 1.0) {
1775 ret = insert_filter(&last_filter, &pad_idx, "transpose",
1776 displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1777 } else if (fabs(theta) > 1.0) {
1778 char rotate_buf[64];
1779 snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1780 ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1781 } else if (fabs(theta) < 1.0) {
1782 if (displaymatrix && displaymatrix[4] < 0) {
1783 ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1786 if (ret < 0)
1787 return ret;
1789 ifp->displaymatrix_applied = 1;
1792 snprintf(name, sizeof(name), "trim_in_%s", ifp->opts.name);
1793 ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
1794 &last_filter, &pad_idx, name);
1795 if (ret < 0)
1796 return ret;
1798 if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1799 return ret;
1800 return 0;
1801 fail:
1802 av_freep(&par);
1804 return ret;
1807 static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph,
1808 InputFilter *ifilter, AVFilterInOut *in)
1810 InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1811 AVFilterContext *last_filter;
1812 const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
1813 AVBPrint args;
1814 char name[255];
1815 int ret, pad_idx = 0;
1817 av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
1818 av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1819 ifp->time_base.num, ifp->time_base.den,
1820 ifp->sample_rate,
1821 av_get_sample_fmt_name(ifp->format));
1822 if (av_channel_layout_check(&ifp->ch_layout) &&
1823 ifp->ch_layout.order != AV_CHANNEL_ORDER_UNSPEC) {
1824 av_bprintf(&args, ":channel_layout=");
1825 av_channel_layout_describe_bprint(&ifp->ch_layout, &args);
1826 } else
1827 av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
1828 snprintf(name, sizeof(name), "graph_%d_in_%s", fg->index, ifp->opts.name);
1830 if ((ret = avfilter_graph_create_filter(&ifp->filter, abuffer_filt,
1831 name, args.str, NULL,
1832 graph)) < 0)
1833 return ret;
1834 last_filter = ifp->filter;
1836 snprintf(name, sizeof(name), "trim for input stream %s", ifp->opts.name);
1837 ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
1838 &last_filter, &pad_idx, name);
1839 if (ret < 0)
1840 return ret;
1842 if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1843 return ret;
1845 return 0;
1848 static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph,
1849 InputFilter *ifilter, AVFilterInOut *in)
1851 switch (ifp_from_ifilter(ifilter)->type) {
1852 case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, graph, ifilter, in);
1853 case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, graph, ifilter, in);
1854 default: av_assert0(0); return 0;
1858 static void cleanup_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
1860 for (int i = 0; i < fg->nb_outputs; i++)
1861 ofp_from_ofilter(fg->outputs[i])->filter = NULL;
1862 for (int i = 0; i < fg->nb_inputs; i++)
1863 ifp_from_ifilter(fg->inputs[i])->filter = NULL;
1864 avfilter_graph_free(&fgt->graph);
1867 static int filter_is_buffersrc(const AVFilterContext *f)
1869 return f->nb_inputs == 0 &&
1870 (!strcmp(f->filter->name, "buffer") ||
1871 !strcmp(f->filter->name, "abuffer"));
1874 static int graph_is_meta(AVFilterGraph *graph)
1876 for (unsigned i = 0; i < graph->nb_filters; i++) {
1877 const AVFilterContext *f = graph->filters[i];
1879 /* in addition to filters flagged as meta, also
1880 * disregard sinks and buffersources (but not other sources,
1881 * since they introduce data we are not aware of)
1883 if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
1884 f->nb_outputs == 0 ||
1885 filter_is_buffersrc(f)))
1886 return 0;
1888 return 1;
1891 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer);
1893 static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
1895 FilterGraphPriv *fgp = fgp_from_fg(fg);
1896 AVBufferRef *hw_device;
1897 AVFilterInOut *inputs, *outputs, *cur;
1898 int ret, i, simple = filtergraph_is_simple(fg);
1899 int have_input_eof = 0;
1900 const char *graph_desc = fgp->graph_desc;
1902 cleanup_filtergraph(fg, fgt);
1903 fgt->graph = avfilter_graph_alloc();
1904 if (!fgt->graph)
1905 return AVERROR(ENOMEM);
1907 if (simple) {
1908 OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
1910 if (filter_nbthreads) {
1911 ret = av_opt_set(fgt->graph, "threads", filter_nbthreads, 0);
1912 if (ret < 0)
1913 goto fail;
1914 } else if (fgp->nb_threads) {
1915 ret = av_opt_set(fgt->graph, "threads", fgp->nb_threads, 0);
1916 if (ret < 0)
1917 return ret;
1920 if (av_dict_count(ofp->sws_opts)) {
1921 ret = av_dict_get_string(ofp->sws_opts,
1922 &fgt->graph->scale_sws_opts,
1923 '=', ':');
1924 if (ret < 0)
1925 goto fail;
1928 if (av_dict_count(ofp->swr_opts)) {
1929 char *args;
1930 ret = av_dict_get_string(ofp->swr_opts, &args, '=', ':');
1931 if (ret < 0)
1932 goto fail;
1933 av_opt_set(fgt->graph, "aresample_swr_opts", args, 0);
1934 av_free(args);
1936 } else {
1937 fgt->graph->nb_threads = filter_complex_nbthreads;
1940 hw_device = hw_device_for_filter();
1942 ret = graph_parse(fg, fgt->graph, graph_desc, &inputs, &outputs, hw_device);
1943 if (ret < 0)
1944 goto fail;
1946 for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1947 if ((ret = configure_input_filter(fg, fgt->graph, fg->inputs[i], cur)) < 0) {
1948 avfilter_inout_free(&inputs);
1949 avfilter_inout_free(&outputs);
1950 goto fail;
1952 avfilter_inout_free(&inputs);
1954 for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
1955 ret = configure_output_filter(fgp, fgt->graph, fg->outputs[i], cur);
1956 if (ret < 0) {
1957 avfilter_inout_free(&outputs);
1958 goto fail;
1961 avfilter_inout_free(&outputs);
1963 if (fgp->disable_conversions)
1964 avfilter_graph_set_auto_convert(fgt->graph, AVFILTER_AUTO_CONVERT_NONE);
1965 if ((ret = avfilter_graph_config(fgt->graph, NULL)) < 0)
1966 goto fail;
1968 fgp->is_meta = graph_is_meta(fgt->graph);
1970 /* limit the lists of allowed formats to the ones selected, to
1971 * make sure they stay the same if the filtergraph is reconfigured later */
1972 for (int i = 0; i < fg->nb_outputs; i++) {
1973 OutputFilter *ofilter = fg->outputs[i];
1974 OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1975 AVFilterContext *sink = ofp->filter;
1977 ofp->format = av_buffersink_get_format(sink);
1979 ofp->width = av_buffersink_get_w(sink);
1980 ofp->height = av_buffersink_get_h(sink);
1981 ofp->color_space = av_buffersink_get_colorspace(sink);
1982 ofp->color_range = av_buffersink_get_color_range(sink);
1984 // If the timing parameters are not locked yet, get the tentative values
1985 // here but don't lock them. They will only be used if no output frames
1986 // are ever produced.
1987 if (!ofp->tb_out_locked) {
1988 AVRational fr = av_buffersink_get_frame_rate(sink);
1989 if (ofp->fps.framerate.num <= 0 && ofp->fps.framerate.den <= 0 &&
1990 fr.num > 0 && fr.den > 0)
1991 ofp->fps.framerate = fr;
1992 ofp->tb_out = av_buffersink_get_time_base(sink);
1994 ofp->sample_aspect_ratio = av_buffersink_get_sample_aspect_ratio(sink);
1996 ofp->sample_rate = av_buffersink_get_sample_rate(sink);
1997 av_channel_layout_uninit(&ofp->ch_layout);
1998 ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
1999 if (ret < 0)
2000 goto fail;
2003 for (int i = 0; i < fg->nb_inputs; i++) {
2004 InputFilterPriv *ifp = ifp_from_ifilter(fg->inputs[i]);
2005 AVFrame *tmp;
2006 while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
2007 if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
2008 sub2video_frame(&ifp->ifilter, tmp, !fgt->graph);
2009 } else {
2010 ret = av_buffersrc_add_frame(ifp->filter, tmp);
2012 av_frame_free(&tmp);
2013 if (ret < 0)
2014 goto fail;
2018 /* send the EOFs for the finished inputs */
2019 for (int i = 0; i < fg->nb_inputs; i++) {
2020 InputFilterPriv *ifp = ifp_from_ifilter(fg->inputs[i]);
2021 if (fgt->eof_in[i]) {
2022 ret = av_buffersrc_add_frame(ifp->filter, NULL);
2023 if (ret < 0)
2024 goto fail;
2025 have_input_eof = 1;
2029 if (have_input_eof) {
2030 // make sure the EOF propagates to the end of the graph
2031 ret = avfilter_graph_request_oldest(fgt->graph);
2032 if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
2033 goto fail;
2036 return 0;
2037 fail:
2038 cleanup_filtergraph(fg, fgt);
2039 return ret;
2042 static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
2044 InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2045 AVFrameSideData *sd;
2046 int ret;
2048 ret = av_buffer_replace(&ifp->hw_frames_ctx, frame->hw_frames_ctx);
2049 if (ret < 0)
2050 return ret;
2052 ifp->time_base = (ifp->type == AVMEDIA_TYPE_AUDIO) ? (AVRational){ 1, frame->sample_rate } :
2053 (ifp->opts.flags & IFILTER_FLAG_CFR) ? av_inv_q(ifp->opts.framerate) :
2054 frame->time_base;
2056 ifp->format = frame->format;
2058 ifp->width = frame->width;
2059 ifp->height = frame->height;
2060 ifp->sample_aspect_ratio = frame->sample_aspect_ratio;
2061 ifp->color_space = frame->colorspace;
2062 ifp->color_range = frame->color_range;
2064 ifp->sample_rate = frame->sample_rate;
2065 ret = av_channel_layout_copy(&ifp->ch_layout, &frame->ch_layout);
2066 if (ret < 0)
2067 return ret;
2069 sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DISPLAYMATRIX);
2070 if (sd)
2071 memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
2072 ifp->displaymatrix_present = !!sd;
2074 return 0;
2077 int filtergraph_is_simple(const FilterGraph *fg)
2079 const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
2080 return fgp->is_simple;
2083 static void send_command(FilterGraph *fg, AVFilterGraph *graph,
2084 double time, const char *target,
2085 const char *command, const char *arg, int all_filters)
2087 int ret;
2089 if (!graph)
2090 return;
2092 if (time < 0) {
2093 char response[4096];
2094 ret = avfilter_graph_send_command(graph, target, command, arg,
2095 response, sizeof(response),
2096 all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
2097 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
2098 fg->index, ret, response);
2099 } else if (!all_filters) {
2100 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
2101 } else {
2102 ret = avfilter_graph_queue_command(graph, target, command, arg, 0, time);
2103 if (ret < 0)
2104 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
2108 static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
2110 int nb_requests, nb_requests_max = -1;
2111 int best_input = -1;
2113 for (int i = 0; i < fg->nb_inputs; i++) {
2114 InputFilter *ifilter = fg->inputs[i];
2115 InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2117 if (fgt->eof_in[i])
2118 continue;
2120 nb_requests = av_buffersrc_get_nb_failed_requests(ifp->filter);
2121 if (nb_requests > nb_requests_max) {
2122 nb_requests_max = nb_requests;
2123 best_input = i;
2127 av_assert0(best_input >= 0);
2129 return best_input;
2132 static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame)
2134 OutputFilter *ofilter = &ofp->ofilter;
2135 FPSConvContext *fps = &ofp->fps;
2136 AVRational tb = (AVRational){ 0, 0 };
2137 AVRational fr;
2138 const FrameData *fd;
2140 fd = frame_data_c(frame);
2142 // apply -enc_time_base
2143 if (ofp->enc_timebase.num == ENC_TIME_BASE_DEMUX &&
2144 (fd->dec.tb.num <= 0 || fd->dec.tb.den <= 0)) {
2145 av_log(ofp, AV_LOG_ERROR,
2146 "Demuxing timebase not available - cannot use it for encoding\n");
2147 return AVERROR(EINVAL);
2150 switch (ofp->enc_timebase.num) {
2151 case 0: break;
2152 case ENC_TIME_BASE_DEMUX: tb = fd->dec.tb; break;
2153 case ENC_TIME_BASE_FILTER: tb = frame->time_base; break;
2154 default: tb = ofp->enc_timebase; break;
2157 if (ofilter->type == AVMEDIA_TYPE_AUDIO) {
2158 tb = tb.num ? tb : (AVRational){ 1, frame->sample_rate };
2159 goto finish;
2162 fr = fps->framerate;
2163 if (!fr.num) {
2164 AVRational fr_sink = av_buffersink_get_frame_rate(ofp->filter);
2165 if (fr_sink.num > 0 && fr_sink.den > 0)
2166 fr = fr_sink;
2169 if (fps->vsync_method == VSYNC_CFR || fps->vsync_method == VSYNC_VSCFR) {
2170 if (!fr.num && !fps->framerate_max.num) {
2171 fr = (AVRational){25, 1};
2172 av_log(ofp, AV_LOG_WARNING,
2173 "No information "
2174 "about the input framerate is available. Falling "
2175 "back to a default value of 25fps. Use the -r option "
2176 "if you want a different framerate.\n");
2179 if (fps->framerate_max.num &&
2180 (av_q2d(fr) > av_q2d(fps->framerate_max) ||
2181 !fr.den))
2182 fr = fps->framerate_max;
2185 if (fr.num > 0) {
2186 if (fps->framerate_supported) {
2187 int idx = av_find_nearest_q_idx(fr, fps->framerate_supported);
2188 fr = fps->framerate_supported[idx];
2190 if (fps->framerate_clip) {
2191 av_reduce(&fr.num, &fr.den,
2192 fr.num, fr.den, fps->framerate_clip);
2196 if (!(tb.num > 0 && tb.den > 0))
2197 tb = av_inv_q(fr);
2198 if (!(tb.num > 0 && tb.den > 0))
2199 tb = frame->time_base;
2201 fps->framerate = fr;
2202 finish:
2203 ofp->tb_out = tb;
2204 ofp->tb_out_locked = 1;
2206 return 0;
2209 static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame,
2210 AVRational tb_dst, int64_t start_time)
2212 double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
2214 AVRational tb = tb_dst;
2215 AVRational filter_tb = frame->time_base;
2216 const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
2218 if (frame->pts == AV_NOPTS_VALUE)
2219 goto early_exit;
2221 tb.den <<= extra_bits;
2222 float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
2223 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
2224 float_pts /= 1 << extra_bits;
2225 // when float_pts is not exactly an integer,
2226 // avoid exact midpoints to reduce the chance of rounding differences, this
2227 // can be removed in case the fps code is changed to work with integers
2228 if (float_pts != llrint(float_pts))
2229 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
2231 frame->pts = av_rescale_q(frame->pts, filter_tb, tb_dst) -
2232 av_rescale_q(start_time, AV_TIME_BASE_Q, tb_dst);
2233 frame->time_base = tb_dst;
2235 early_exit:
2237 if (debug_ts) {
2238 av_log(logctx, AV_LOG_INFO,
2239 "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
2240 frame ? av_ts2str(frame->pts) : "NULL",
2241 av_ts2timestr(frame->pts, &tb_dst),
2242 float_pts, tb_dst.num, tb_dst.den);
2245 return float_pts;
2248 /* Convert frame timestamps to the encoder timebase and decide how many times
2249 * should this (and possibly previous) frame be repeated in order to conform to
2250 * desired target framerate (if any).
2252 static void video_sync_process(OutputFilterPriv *ofp, AVFrame *frame,
2253 int64_t *nb_frames, int64_t *nb_frames_prev)
2255 OutputFilter *ofilter = &ofp->ofilter;
2256 FPSConvContext *fps = &ofp->fps;
2257 double delta0, delta, sync_ipts, duration;
2259 if (!frame) {
2260 *nb_frames_prev = *nb_frames = mid_pred(fps->frames_prev_hist[0],
2261 fps->frames_prev_hist[1],
2262 fps->frames_prev_hist[2]);
2264 if (!*nb_frames && fps->last_dropped) {
2265 atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2266 fps->last_dropped++;
2269 goto finish;
2272 duration = frame->duration * av_q2d(frame->time_base) / av_q2d(ofp->tb_out);
2274 sync_ipts = adjust_frame_pts_to_encoder_tb(ofilter->graph, frame,
2275 ofp->tb_out, ofp->ts_offset);
2276 /* delta0 is the "drift" between the input frame and
2277 * where it would fall in the output. */
2278 delta0 = sync_ipts - ofp->next_pts;
2279 delta = delta0 + duration;
2281 // tracks the number of times the PREVIOUS frame should be duplicated,
2282 // mostly for variable framerate (VFR)
2283 *nb_frames_prev = 0;
2284 /* by default, we output a single frame */
2285 *nb_frames = 1;
2287 if (delta0 < 0 &&
2288 delta > 0 &&
2289 fps->vsync_method != VSYNC_PASSTHROUGH
2290 #if FFMPEG_OPT_VSYNC_DROP
2291 && fps->vsync_method != VSYNC_DROP
2292 #endif
2294 if (delta0 < -0.6) {
2295 av_log(ofp, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
2296 } else
2297 av_log(ofp, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
2298 sync_ipts = ofp->next_pts;
2299 duration += delta0;
2300 delta0 = 0;
2303 switch (fps->vsync_method) {
2304 case VSYNC_VSCFR:
2305 if (fps->frame_number == 0 && delta0 >= 0.5) {
2306 av_log(ofp, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
2307 delta = duration;
2308 delta0 = 0;
2309 ofp->next_pts = llrint(sync_ipts);
2311 case VSYNC_CFR:
2312 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
2313 if (frame_drop_threshold && delta < frame_drop_threshold && fps->frame_number) {
2314 *nb_frames = 0;
2315 } else if (delta < -1.1)
2316 *nb_frames = 0;
2317 else if (delta > 1.1) {
2318 *nb_frames = llrintf(delta);
2319 if (delta0 > 1.1)
2320 *nb_frames_prev = llrintf(delta0 - 0.6);
2322 frame->duration = 1;
2323 break;
2324 case VSYNC_VFR:
2325 if (delta <= -0.6)
2326 *nb_frames = 0;
2327 else if (delta > 0.6)
2328 ofp->next_pts = llrint(sync_ipts);
2329 frame->duration = llrint(duration);
2330 break;
2331 #if FFMPEG_OPT_VSYNC_DROP
2332 case VSYNC_DROP:
2333 #endif
2334 case VSYNC_PASSTHROUGH:
2335 ofp->next_pts = llrint(sync_ipts);
2336 frame->duration = llrint(duration);
2337 break;
2338 default:
2339 av_assert0(0);
2342 finish:
2343 memmove(fps->frames_prev_hist + 1,
2344 fps->frames_prev_hist,
2345 sizeof(fps->frames_prev_hist[0]) * (FF_ARRAY_ELEMS(fps->frames_prev_hist) - 1));
2346 fps->frames_prev_hist[0] = *nb_frames_prev;
2348 if (*nb_frames_prev == 0 && fps->last_dropped) {
2349 atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2350 av_log(ofp, AV_LOG_VERBOSE,
2351 "*** dropping frame %"PRId64" at ts %"PRId64"\n",
2352 fps->frame_number, fps->last_frame->pts);
2354 if (*nb_frames > (*nb_frames_prev && fps->last_dropped) + (*nb_frames > *nb_frames_prev)) {
2355 uint64_t nb_frames_dup;
2356 if (*nb_frames > dts_error_threshold * 30) {
2357 av_log(ofp, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", *nb_frames - 1);
2358 atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2359 *nb_frames = 0;
2360 return;
2362 nb_frames_dup = atomic_fetch_add(&ofilter->nb_frames_dup,
2363 *nb_frames - (*nb_frames_prev && fps->last_dropped) - (*nb_frames > *nb_frames_prev));
2364 av_log(ofp, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", *nb_frames - 1);
2365 if (nb_frames_dup > fps->dup_warning) {
2366 av_log(ofp, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", fps->dup_warning);
2367 fps->dup_warning *= 10;
2371 fps->last_dropped = *nb_frames == *nb_frames_prev && frame;
2372 fps->dropped_keyframe |= fps->last_dropped && (frame->flags & AV_FRAME_FLAG_KEY);
2375 static int close_output(OutputFilterPriv *ofp, FilterGraphThread *fgt)
2377 FilterGraphPriv *fgp = fgp_from_fg(ofp->ofilter.graph);
2378 int ret;
2380 // we are finished and no frames were ever seen at this output,
2381 // at least initialize the encoder with a dummy frame
2382 if (!fgt->got_frame) {
2383 AVFrame *frame = fgt->frame;
2384 FrameData *fd;
2386 frame->time_base = ofp->tb_out;
2387 frame->format = ofp->format;
2389 frame->width = ofp->width;
2390 frame->height = ofp->height;
2391 frame->sample_aspect_ratio = ofp->sample_aspect_ratio;
2393 frame->sample_rate = ofp->sample_rate;
2394 if (ofp->ch_layout.nb_channels) {
2395 ret = av_channel_layout_copy(&frame->ch_layout, &ofp->ch_layout);
2396 if (ret < 0)
2397 return ret;
2400 fd = frame_data(frame);
2401 if (!fd)
2402 return AVERROR(ENOMEM);
2404 fd->frame_rate_filter = ofp->fps.framerate;
2406 av_assert0(!frame->buf[0]);
2408 av_log(ofp, AV_LOG_WARNING,
2409 "No filtered frames for output stream, trying to "
2410 "initialize anyway.\n");
2412 ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame);
2413 if (ret < 0) {
2414 av_frame_unref(frame);
2415 return ret;
2419 fgt->eof_out[ofp->index] = 1;
2421 ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, NULL);
2422 return (ret == AVERROR_EOF) ? 0 : ret;
2425 static int fg_output_frame(OutputFilterPriv *ofp, FilterGraphThread *fgt,
2426 AVFrame *frame)
2428 FilterGraphPriv *fgp = fgp_from_fg(ofp->ofilter.graph);
2429 AVFrame *frame_prev = ofp->fps.last_frame;
2430 enum AVMediaType type = ofp->ofilter.type;
2432 int64_t nb_frames = !!frame, nb_frames_prev = 0;
2434 if (type == AVMEDIA_TYPE_VIDEO && (frame || fgt->got_frame))
2435 video_sync_process(ofp, frame, &nb_frames, &nb_frames_prev);
2437 for (int64_t i = 0; i < nb_frames; i++) {
2438 AVFrame *frame_out;
2439 int ret;
2441 if (type == AVMEDIA_TYPE_VIDEO) {
2442 AVFrame *frame_in = (i < nb_frames_prev && frame_prev->buf[0]) ?
2443 frame_prev : frame;
2444 if (!frame_in)
2445 break;
2447 frame_out = fgp->frame_enc;
2448 ret = av_frame_ref(frame_out, frame_in);
2449 if (ret < 0)
2450 return ret;
2452 frame_out->pts = ofp->next_pts;
2454 if (ofp->fps.dropped_keyframe) {
2455 frame_out->flags |= AV_FRAME_FLAG_KEY;
2456 ofp->fps.dropped_keyframe = 0;
2458 } else {
2459 frame->pts = (frame->pts == AV_NOPTS_VALUE) ? ofp->next_pts :
2460 av_rescale_q(frame->pts, frame->time_base, ofp->tb_out) -
2461 av_rescale_q(ofp->ts_offset, AV_TIME_BASE_Q, ofp->tb_out);
2463 frame->time_base = ofp->tb_out;
2464 frame->duration = av_rescale_q(frame->nb_samples,
2465 (AVRational){ 1, frame->sample_rate },
2466 ofp->tb_out);
2468 ofp->next_pts = frame->pts + frame->duration;
2470 frame_out = frame;
2473 // send the frame to consumers
2474 ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame_out);
2475 if (ret < 0) {
2476 av_frame_unref(frame_out);
2478 if (!fgt->eof_out[ofp->index]) {
2479 fgt->eof_out[ofp->index] = 1;
2480 fgp->nb_outputs_done++;
2483 return ret == AVERROR_EOF ? 0 : ret;
2486 if (type == AVMEDIA_TYPE_VIDEO) {
2487 ofp->fps.frame_number++;
2488 ofp->next_pts++;
2490 if (i == nb_frames_prev && frame)
2491 frame->flags &= ~AV_FRAME_FLAG_KEY;
2494 fgt->got_frame = 1;
2497 if (frame && frame_prev) {
2498 av_frame_unref(frame_prev);
2499 av_frame_move_ref(frame_prev, frame);
2502 if (!frame)
2503 return close_output(ofp, fgt);
2505 return 0;
2508 static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt,
2509 AVFrame *frame)
2511 FilterGraphPriv *fgp = fgp_from_fg(ofp->ofilter.graph);
2512 AVFilterContext *filter = ofp->filter;
2513 FrameData *fd;
2514 int ret;
2516 ret = av_buffersink_get_frame_flags(filter, frame,
2517 AV_BUFFERSINK_FLAG_NO_REQUEST);
2518 if (ret == AVERROR_EOF && !fgt->eof_out[ofp->index]) {
2519 ret = fg_output_frame(ofp, fgt, NULL);
2520 return (ret < 0) ? ret : 1;
2521 } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
2522 return 1;
2523 } else if (ret < 0) {
2524 av_log(ofp, AV_LOG_WARNING,
2525 "Error in retrieving a frame from the filtergraph: %s\n",
2526 av_err2str(ret));
2527 return ret;
2530 if (fgt->eof_out[ofp->index]) {
2531 av_frame_unref(frame);
2532 return 0;
2535 frame->time_base = av_buffersink_get_time_base(filter);
2537 if (debug_ts)
2538 av_log(ofp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
2539 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &frame->time_base),
2540 frame->time_base.num, frame->time_base.den);
2542 // Choose the output timebase the first time we get a frame.
2543 if (!ofp->tb_out_locked) {
2544 ret = choose_out_timebase(ofp, frame);
2545 if (ret < 0) {
2546 av_log(ofp, AV_LOG_ERROR, "Could not choose an output time base\n");
2547 av_frame_unref(frame);
2548 return ret;
2552 fd = frame_data(frame);
2553 if (!fd) {
2554 av_frame_unref(frame);
2555 return AVERROR(ENOMEM);
2558 fd->wallclock[LATENCY_PROBE_FILTER_POST] = av_gettime_relative();
2560 // only use bits_per_raw_sample passed through from the decoder
2561 // if the filtergraph did not touch the frame data
2562 if (!fgp->is_meta)
2563 fd->bits_per_raw_sample = 0;
2565 if (ofp->ofilter.type == AVMEDIA_TYPE_VIDEO) {
2566 if (!frame->duration) {
2567 AVRational fr = av_buffersink_get_frame_rate(filter);
2568 if (fr.num > 0 && fr.den > 0)
2569 frame->duration = av_rescale_q(1, av_inv_q(fr), frame->time_base);
2572 fd->frame_rate_filter = ofp->fps.framerate;
2575 ret = fg_output_frame(ofp, fgt, frame);
2576 av_frame_unref(frame);
2577 if (ret < 0)
2578 return ret;
2580 return 0;
2583 /* retrieve all frames available at filtergraph outputs
2584 * and send them to consumers */
2585 static int read_frames(FilterGraph *fg, FilterGraphThread *fgt,
2586 AVFrame *frame)
2588 FilterGraphPriv *fgp = fgp_from_fg(fg);
2589 int did_step = 0;
2591 // graph not configured, just select the input to request
2592 if (!fgt->graph) {
2593 for (int i = 0; i < fg->nb_inputs; i++) {
2594 InputFilterPriv *ifp = ifp_from_ifilter(fg->inputs[i]);
2595 if (ifp->format < 0 && !fgt->eof_in[i]) {
2596 fgt->next_in = i;
2597 return 0;
2601 // This state - graph is not configured, but all inputs are either
2602 // initialized or EOF - should be unreachable because sending EOF to a
2603 // filter without even a fallback format should fail
2604 av_assert0(0);
2605 return AVERROR_BUG;
2608 while (fgp->nb_outputs_done < fg->nb_outputs) {
2609 int ret;
2611 ret = avfilter_graph_request_oldest(fgt->graph);
2612 if (ret == AVERROR(EAGAIN)) {
2613 fgt->next_in = choose_input(fg, fgt);
2614 break;
2615 } else if (ret < 0) {
2616 if (ret == AVERROR_EOF)
2617 av_log(fg, AV_LOG_VERBOSE, "Filtergraph returned EOF, finishing\n");
2618 else
2619 av_log(fg, AV_LOG_ERROR,
2620 "Error requesting a frame from the filtergraph: %s\n",
2621 av_err2str(ret));
2622 return ret;
2624 fgt->next_in = fg->nb_inputs;
2626 // return after one iteration, so that scheduler can rate-control us
2627 if (did_step && fgp->have_sources)
2628 return 0;
2630 /* Reap all buffers present in the buffer sinks */
2631 for (int i = 0; i < fg->nb_outputs; i++) {
2632 OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[i]);
2634 ret = 0;
2635 while (!ret) {
2636 ret = fg_output_step(ofp, fgt, frame);
2637 if (ret < 0)
2638 return ret;
2641 did_step = 1;
2644 return (fgp->nb_outputs_done == fg->nb_outputs) ? AVERROR_EOF : 0;
2647 static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
2649 InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2650 int64_t pts2;
2652 /* subtitles seem to be usually muxed ahead of other streams;
2653 if not, subtracting a larger time here is necessary */
2654 pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
2656 /* do not send the heartbeat frame if the subtitle is already ahead */
2657 if (pts2 <= ifp->sub2video.last_pts)
2658 return;
2660 if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
2661 /* if we have hit the end of the current displayed subpicture,
2662 or if we need to initialize the system, update the
2663 overlayed subpicture and its start/end times */
2664 sub2video_update(ifp, pts2 + 1, NULL);
2665 else
2666 sub2video_push_ref(ifp, pts2);
2669 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
2671 InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2672 int ret;
2674 if (buffer) {
2675 AVFrame *tmp;
2677 if (!frame)
2678 return 0;
2680 tmp = av_frame_alloc();
2681 if (!tmp)
2682 return AVERROR(ENOMEM);
2684 av_frame_move_ref(tmp, frame);
2686 ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2687 if (ret < 0) {
2688 av_frame_free(&tmp);
2689 return ret;
2692 return 0;
2695 // heartbeat frame
2696 if (frame && !frame->buf[0]) {
2697 sub2video_heartbeat(ifilter, frame->pts, frame->time_base);
2698 return 0;
2701 if (!frame) {
2702 if (ifp->sub2video.end_pts < INT64_MAX)
2703 sub2video_update(ifp, INT64_MAX, NULL);
2705 return av_buffersrc_add_frame(ifp->filter, NULL);
2708 ifp->width = frame->width ? frame->width : ifp->width;
2709 ifp->height = frame->height ? frame->height : ifp->height;
2711 sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
2713 return 0;
2716 static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter,
2717 int64_t pts, AVRational tb)
2719 InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2720 int ret;
2722 if (fgt->eof_in[ifp->index])
2723 return 0;
2725 fgt->eof_in[ifp->index] = 1;
2727 if (ifp->filter) {
2728 pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
2729 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2731 ret = av_buffersrc_close(ifp->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2732 if (ret < 0)
2733 return ret;
2734 } else {
2735 if (ifp->format < 0) {
2736 // the filtergraph was never configured, use the fallback parameters
2737 ifp->format = ifp->opts.fallback->format;
2738 ifp->sample_rate = ifp->opts.fallback->sample_rate;
2739 ifp->width = ifp->opts.fallback->width;
2740 ifp->height = ifp->opts.fallback->height;
2741 ifp->sample_aspect_ratio = ifp->opts.fallback->sample_aspect_ratio;
2742 ifp->color_space = ifp->opts.fallback->colorspace;
2743 ifp->color_range = ifp->opts.fallback->color_range;
2744 ifp->time_base = ifp->opts.fallback->time_base;
2746 ret = av_channel_layout_copy(&ifp->ch_layout,
2747 &ifp->opts.fallback->ch_layout);
2748 if (ret < 0)
2749 return ret;
2751 if (ifilter_has_all_input_formats(ifilter->graph)) {
2752 ret = configure_filtergraph(ifilter->graph, fgt);
2753 if (ret < 0) {
2754 av_log(ifilter->graph, AV_LOG_ERROR, "Error initializing filters!\n");
2755 return ret;
2760 if (ifp->format < 0) {
2761 av_log(ifilter->graph, AV_LOG_ERROR,
2762 "Cannot determine format of input %s after EOF\n",
2763 ifp->opts.name);
2764 return AVERROR_INVALIDDATA;
2768 return 0;
2771 enum ReinitReason {
2772 VIDEO_CHANGED = (1 << 0),
2773 AUDIO_CHANGED = (1 << 1),
2774 MATRIX_CHANGED = (1 << 2),
2775 HWACCEL_CHANGED = (1 << 3)
2778 static const char *unknown_if_null(const char *str)
2780 return str ? str : "unknown";
2783 static int send_frame(FilterGraph *fg, FilterGraphThread *fgt,
2784 InputFilter *ifilter, AVFrame *frame)
2786 InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2787 FrameData *fd;
2788 AVFrameSideData *sd;
2789 int need_reinit = 0, ret;
2791 /* determine if the parameters for this input changed */
2792 switch (ifp->type) {
2793 case AVMEDIA_TYPE_AUDIO:
2794 if (ifp->format != frame->format ||
2795 ifp->sample_rate != frame->sample_rate ||
2796 av_channel_layout_compare(&ifp->ch_layout, &frame->ch_layout))
2797 need_reinit |= AUDIO_CHANGED;
2798 break;
2799 case AVMEDIA_TYPE_VIDEO:
2800 if (ifp->format != frame->format ||
2801 ifp->width != frame->width ||
2802 ifp->height != frame->height ||
2803 ifp->color_space != frame->colorspace ||
2804 ifp->color_range != frame->color_range)
2805 need_reinit |= VIDEO_CHANGED;
2806 break;
2809 if (sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DISPLAYMATRIX)) {
2810 if (!ifp->displaymatrix_present ||
2811 memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
2812 need_reinit |= MATRIX_CHANGED;
2813 } else if (ifp->displaymatrix_present)
2814 need_reinit |= MATRIX_CHANGED;
2816 if (!(ifp->opts.flags & IFILTER_FLAG_REINIT) && fgt->graph)
2817 need_reinit = 0;
2819 if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
2820 (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2821 need_reinit |= HWACCEL_CHANGED;
2823 if (need_reinit) {
2824 ret = ifilter_parameters_from_frame(ifilter, frame);
2825 if (ret < 0)
2826 return ret;
2829 /* (re)init the graph if possible, otherwise buffer the frame and return */
2830 if (need_reinit || !fgt->graph) {
2831 AVFrame *tmp = av_frame_alloc();
2833 if (!tmp)
2834 return AVERROR(ENOMEM);
2836 if (!ifilter_has_all_input_formats(fg)) {
2837 av_frame_move_ref(tmp, frame);
2839 ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2840 if (ret < 0)
2841 av_frame_free(&tmp);
2843 return ret;
2846 ret = fgt->graph ? read_frames(fg, fgt, tmp) : 0;
2847 av_frame_free(&tmp);
2848 if (ret < 0)
2849 return ret;
2851 if (fgt->graph) {
2852 AVBPrint reason;
2853 av_bprint_init(&reason, 0, AV_BPRINT_SIZE_AUTOMATIC);
2854 if (need_reinit & AUDIO_CHANGED) {
2855 const char *sample_format_name = av_get_sample_fmt_name(frame->format);
2856 av_bprintf(&reason, "audio parameters changed to %d Hz, ", frame->sample_rate);
2857 av_channel_layout_describe_bprint(&frame->ch_layout, &reason);
2858 av_bprintf(&reason, ", %s, ", unknown_if_null(sample_format_name));
2860 if (need_reinit & VIDEO_CHANGED) {
2861 const char *pixel_format_name = av_get_pix_fmt_name(frame->format);
2862 const char *color_space_name = av_color_space_name(frame->colorspace);
2863 const char *color_range_name = av_color_range_name(frame->color_range);
2864 av_bprintf(&reason, "video parameters changed to %s(%s, %s), %dx%d, ",
2865 unknown_if_null(pixel_format_name), unknown_if_null(color_range_name),
2866 unknown_if_null(color_space_name), frame->width, frame->height);
2868 if (need_reinit & MATRIX_CHANGED)
2869 av_bprintf(&reason, "display matrix changed, ");
2870 if (need_reinit & HWACCEL_CHANGED)
2871 av_bprintf(&reason, "hwaccel changed, ");
2872 if (reason.len > 1)
2873 reason.str[reason.len - 2] = '\0'; // remove last comma
2874 av_log(fg, AV_LOG_INFO, "Reconfiguring filter graph%s%s\n", reason.len ? " because " : "", reason.str);
2877 ret = configure_filtergraph(fg, fgt);
2878 if (ret < 0) {
2879 av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
2880 return ret;
2884 frame->pts = av_rescale_q(frame->pts, frame->time_base, ifp->time_base);
2885 frame->duration = av_rescale_q(frame->duration, frame->time_base, ifp->time_base);
2886 frame->time_base = ifp->time_base;
2888 if (ifp->displaymatrix_applied)
2889 av_frame_remove_side_data(frame, AV_FRAME_DATA_DISPLAYMATRIX);
2891 fd = frame_data(frame);
2892 if (!fd)
2893 return AVERROR(ENOMEM);
2894 fd->wallclock[LATENCY_PROBE_FILTER_PRE] = av_gettime_relative();
2896 ret = av_buffersrc_add_frame_flags(ifp->filter, frame,
2897 AV_BUFFERSRC_FLAG_PUSH);
2898 if (ret < 0) {
2899 av_frame_unref(frame);
2900 if (ret != AVERROR_EOF)
2901 av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2902 return ret;
2905 return 0;
2908 static void fg_thread_set_name(const FilterGraph *fg)
2910 char name[16];
2911 if (filtergraph_is_simple(fg)) {
2912 OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
2913 snprintf(name, sizeof(name), "%cf%s",
2914 av_get_media_type_string(ofp->ofilter.type)[0],
2915 ofp->name);
2916 } else {
2917 snprintf(name, sizeof(name), "fc%d", fg->index);
2920 ff_thread_setname(name);
2923 static void fg_thread_uninit(FilterGraphThread *fgt)
2925 if (fgt->frame_queue_out) {
2926 AVFrame *frame;
2927 while (av_fifo_read(fgt->frame_queue_out, &frame, 1) >= 0)
2928 av_frame_free(&frame);
2929 av_fifo_freep2(&fgt->frame_queue_out);
2932 av_frame_free(&fgt->frame);
2933 av_freep(&fgt->eof_in);
2934 av_freep(&fgt->eof_out);
2936 avfilter_graph_free(&fgt->graph);
2938 memset(fgt, 0, sizeof(*fgt));
2941 static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
2943 memset(fgt, 0, sizeof(*fgt));
2945 fgt->frame = av_frame_alloc();
2946 if (!fgt->frame)
2947 goto fail;
2949 fgt->eof_in = av_calloc(fg->nb_inputs, sizeof(*fgt->eof_in));
2950 if (!fgt->eof_in)
2951 goto fail;
2953 fgt->eof_out = av_calloc(fg->nb_outputs, sizeof(*fgt->eof_out));
2954 if (!fgt->eof_out)
2955 goto fail;
2957 fgt->frame_queue_out = av_fifo_alloc2(1, sizeof(AVFrame*), AV_FIFO_FLAG_AUTO_GROW);
2958 if (!fgt->frame_queue_out)
2959 goto fail;
2961 return 0;
2963 fail:
2964 fg_thread_uninit(fgt);
2965 return AVERROR(ENOMEM);
2968 static int filter_thread(void *arg)
2970 FilterGraphPriv *fgp = arg;
2971 FilterGraph *fg = &fgp->fg;
2973 FilterGraphThread fgt;
2974 int ret = 0, input_status = 0;
2976 ret = fg_thread_init(&fgt, fg);
2977 if (ret < 0)
2978 goto finish;
2980 fg_thread_set_name(fg);
2982 // if we have all input parameters the graph can now be configured
2983 if (ifilter_has_all_input_formats(fg)) {
2984 ret = configure_filtergraph(fg, &fgt);
2985 if (ret < 0) {
2986 av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
2987 av_err2str(ret));
2988 goto finish;
2992 while (1) {
2993 InputFilter *ifilter;
2994 InputFilterPriv *ifp;
2995 enum FrameOpaque o;
2996 unsigned input_idx = fgt.next_in;
2998 input_status = sch_filter_receive(fgp->sch, fgp->sch_idx,
2999 &input_idx, fgt.frame);
3000 if (input_status == AVERROR_EOF) {
3001 av_log(fg, AV_LOG_VERBOSE, "Filtering thread received EOF\n");
3002 break;
3003 } else if (input_status == AVERROR(EAGAIN)) {
3004 // should only happen when we didn't request any input
3005 av_assert0(input_idx == fg->nb_inputs);
3006 goto read_frames;
3008 av_assert0(input_status >= 0);
3010 o = (intptr_t)fgt.frame->opaque;
3012 o = (intptr_t)fgt.frame->opaque;
3014 // message on the control stream
3015 if (input_idx == fg->nb_inputs) {
3016 FilterCommand *fc;
3018 av_assert0(o == FRAME_OPAQUE_SEND_COMMAND && fgt.frame->buf[0]);
3020 fc = (FilterCommand*)fgt.frame->buf[0]->data;
3021 send_command(fg, fgt.graph, fc->time, fc->target, fc->command, fc->arg,
3022 fc->all_filters);
3023 av_frame_unref(fgt.frame);
3024 continue;
3027 // we received an input frame or EOF
3028 ifilter = fg->inputs[input_idx];
3029 ifp = ifp_from_ifilter(ifilter);
3031 if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
3032 int hb_frame = input_status >= 0 && o == FRAME_OPAQUE_SUB_HEARTBEAT;
3033 ret = sub2video_frame(ifilter, (fgt.frame->buf[0] || hb_frame) ? fgt.frame : NULL,
3034 !fgt.graph);
3035 } else if (fgt.frame->buf[0]) {
3036 ret = send_frame(fg, &fgt, ifilter, fgt.frame);
3037 } else {
3038 av_assert1(o == FRAME_OPAQUE_EOF);
3039 ret = send_eof(&fgt, ifilter, fgt.frame->pts, fgt.frame->time_base);
3041 av_frame_unref(fgt.frame);
3042 if (ret == AVERROR_EOF) {
3043 av_log(fg, AV_LOG_VERBOSE, "Input %u no longer accepts new data\n",
3044 input_idx);
3045 sch_filter_receive_finish(fgp->sch, fgp->sch_idx, input_idx);
3046 continue;
3048 if (ret < 0)
3049 goto finish;
3051 read_frames:
3052 // retrieve all newly avalable frames
3053 ret = read_frames(fg, &fgt, fgt.frame);
3054 if (ret == AVERROR_EOF) {
3055 av_log(fg, AV_LOG_VERBOSE, "All consumers returned EOF\n");
3056 break;
3057 } else if (ret < 0) {
3058 av_log(fg, AV_LOG_ERROR, "Error sending frames to consumers: %s\n",
3059 av_err2str(ret));
3060 goto finish;
3064 for (unsigned i = 0; i < fg->nb_outputs; i++) {
3065 OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[i]);
3067 if (fgt.eof_out[i] || !fgt.graph)
3068 continue;
3070 ret = fg_output_frame(ofp, &fgt, NULL);
3071 if (ret < 0)
3072 goto finish;
3075 finish:
3076 // EOF is normal termination
3077 if (ret == AVERROR_EOF)
3078 ret = 0;
3080 fg_thread_uninit(&fgt);
3082 return ret;
3085 void fg_send_command(FilterGraph *fg, double time, const char *target,
3086 const char *command, const char *arg, int all_filters)
3088 FilterGraphPriv *fgp = fgp_from_fg(fg);
3089 AVBufferRef *buf;
3090 FilterCommand *fc;
3092 fc = av_mallocz(sizeof(*fc));
3093 if (!fc)
3094 return;
3096 buf = av_buffer_create((uint8_t*)fc, sizeof(*fc), filter_command_free, NULL, 0);
3097 if (!buf) {
3098 av_freep(&fc);
3099 return;
3102 fc->target = av_strdup(target);
3103 fc->command = av_strdup(command);
3104 fc->arg = av_strdup(arg);
3105 if (!fc->target || !fc->command || !fc->arg) {
3106 av_buffer_unref(&buf);
3107 return;
3110 fc->time = time;
3111 fc->all_filters = all_filters;
3113 fgp->frame->buf[0] = buf;
3114 fgp->frame->opaque = (void*)(intptr_t)FRAME_OPAQUE_SEND_COMMAND;
3116 sch_filter_command(fgp->sch, fgp->sch_idx, fgp->frame);