upgpkg: wordpress 6.2.1-1
[ArchLinux/community.git] / libopenshot / trunk / ffmpeg-4.0.patch
blobe4609d7c7962d79ff2b24068864c7f6dbe8ad659
1 diff --git a/cmake/Modules/FindFFmpeg.cmake b/cmake/Modules/FindFFmpeg.cmake
2 index 4af6cc9..63d543d 100644
3 --- a/cmake/Modules/FindFFmpeg.cmake
4 +++ b/cmake/Modules/FindFFmpeg.cmake
5 @@ -77,14 +77,14 @@ FIND_LIBRARY( SWSCALE_LIBRARY swscale swscale-2 swscale-4
6 $ENV{FFMPEGDIR}/lib/ffmpeg/
7 $ENV{FFMPEGDIR}/bin/ )
9 -#FindAvresample
10 -FIND_PATH( AVRESAMPLE_INCLUDE_DIR libavresample/avresample.h
11 +#FindSwresample
12 +FIND_PATH( SWRESAMPLE_INCLUDE_DIR libswresample/swresample.h
13 PATHS /usr/include/
14 /usr/include/ffmpeg/
15 $ENV{FFMPEGDIR}/include/
16 $ENV{FFMPEGDIR}/include/ffmpeg/ )
18 -FIND_LIBRARY( AVRESAMPLE_LIBRARY avresample avresample-2 avresample-3
19 +FIND_LIBRARY( SWRESAMPLE_LIBRARY swresample
20 PATHS /usr/lib/
21 /usr/lib/ffmpeg/
22 $ENV{FFMPEGDIR}/lib/
23 @@ -113,31 +113,31 @@ IF ( SWSCALE_INCLUDE_DIR AND SWSCALE_LIBRARY )
24 SET ( SWSCALE_FOUND TRUE )
25 ENDIF ( SWSCALE_INCLUDE_DIR AND SWSCALE_LIBRARY )
27 -IF ( AVRESAMPLE_INCLUDE_DIR AND AVRESAMPLE_LIBRARY )
28 - SET ( AVRESAMPLE_FOUND TRUE )
29 -ENDIF ( AVRESAMPLE_INCLUDE_DIR AND AVRESAMPLE_LIBRARY )
30 +IF ( SWRESAMPLE_INCLUDE_DIR AND SWRESAMPLE_LIBRARY )
31 + SET ( SWRESAMPLE_FOUND TRUE )
32 +ENDIF ( SWRESAMPLE_INCLUDE_DIR AND SWRESAMPLE_LIBRARY )
34 -IF ( AVFORMAT_INCLUDE_DIR OR AVCODEC_INCLUDE_DIR OR AVUTIL_INCLUDE_DIR OR AVDEVICE_FOUND OR SWSCALE_FOUND OR AVRESAMPLE_FOUND )
35 +IF ( AVFORMAT_INCLUDE_DIR OR AVCODEC_INCLUDE_DIR OR AVUTIL_INCLUDE_DIR OR AVDEVICE_FOUND OR SWSCALE_FOUND OR SWRESAMPLE_FOUND )
37 SET ( FFMPEG_FOUND TRUE )
39 SET ( FFMPEG_INCLUDE_DIR
40 - ${AVFORMAT_INCLUDE_DIR}
41 - ${AVCODEC_INCLUDE_DIR}
42 - ${AVUTIL_INCLUDE_DIR}
43 - ${AVDEVICE_INCLUDE_DIR}
44 - ${SWSCALE_INCLUDE_DIR}
45 - ${AVRESAMPLE_INCLUDE_DIR} )
47 + ${AVFORMAT_INCLUDE_DIR}
48 + ${AVCODEC_INCLUDE_DIR}
49 + ${AVUTIL_INCLUDE_DIR}
50 + ${AVDEVICE_INCLUDE_DIR}
51 + ${SWSCALE_INCLUDE_DIR}
52 + ${SWRESAMPLE_INCLUDE_DIR} )
54 SET ( FFMPEG_LIBRARIES
55 - ${AVFORMAT_LIBRARY}
56 - ${AVCODEC_LIBRARY}
57 - ${AVUTIL_LIBRARY}
58 - ${AVDEVICE_LIBRARY}
59 - ${SWSCALE_LIBRARY}
60 - ${AVRESAMPLE_LIBRARY} )
62 -ENDIF ( AVFORMAT_INCLUDE_DIR OR AVCODEC_INCLUDE_DIR OR AVUTIL_INCLUDE_DIR OR AVDEVICE_FOUND OR SWSCALE_FOUND OR AVRESAMPLE_FOUND )
63 + ${AVFORMAT_LIBRARY}
64 + ${AVCODEC_LIBRARY}
65 + ${AVUTIL_LIBRARY}
66 + ${AVDEVICE_LIBRARY}
67 + ${SWSCALE_LIBRARY}
68 + ${SWRESAMPLE_LIBRARY} )
70 +ENDIF ( AVFORMAT_INCLUDE_DIR OR AVCODEC_INCLUDE_DIR OR AVUTIL_INCLUDE_DIR OR AVDEVICE_FOUND OR SWSCALE_FOUND OR SWRESAMPLE_FOUND )
72 MARK_AS_ADVANCED(
73 FFMPEG_LIBRARY_DIR
74 diff --git a/include/CrashHandler.h b/include/CrashHandler.h
75 index e3a4bbe..12c79a8 100644
76 --- a/include/CrashHandler.h
77 +++ b/include/CrashHandler.h
78 @@ -53,13 +53,15 @@ namespace openshot {
79 class CrashHandler {
80 private:
81 /// Default constructor
82 - CrashHandler(){}; // Don't allow user to create an instance of this singleton
83 + CrashHandler(){return;}; // Don't allow user to create an instance of this singleton
85 /// Default copy method
86 - CrashHandler(CrashHandler const&){}; // Don't allow the user to copy this instance
87 + //CrashHandler(CrashHandler const&){}; // Don't allow the user to copy this instance
88 + CrashHandler(CrashHandler const&) = delete; // Don't allow the user to copy this instance
90 /// Default assignment operator
91 - CrashHandler & operator=(CrashHandler const&){}; // Don't allow the user to assign this instance
92 + //CrashHandler & operator=(CrashHandler const&){}; // Don't allow the user to assign this instance
93 + CrashHandler & operator=(CrashHandler const&) = delete; // Don't allow the user to assign this instance
95 /// Private variable to keep track of singleton instance
96 static CrashHandler *m_pInstance;
97 diff --git a/include/FFmpegReader.h b/include/FFmpegReader.h
98 index 6072756..e2c4863 100644
99 --- a/include/FFmpegReader.h
100 +++ b/include/FFmpegReader.h
101 @@ -105,6 +105,7 @@ namespace openshot
102 bool check_interlace;
103 bool check_fps;
104 bool has_missing_frames;
105 + bool use_omp_threads;
107 CacheMemory working_cache;
108 CacheMemory missing_frames;
109 diff --git a/include/FFmpegUtilities.h b/include/FFmpegUtilities.h
110 index 578c658..346da54 100644
111 --- a/include/FFmpegUtilities.h
112 +++ b/include/FFmpegUtilities.h
113 @@ -43,7 +43,15 @@
114 #include <libavcodec/avcodec.h>
115 #include <libavformat/avformat.h>
116 #include <libswscale/swscale.h>
117 + // Change this to the first version swrescale works
118 + #if (LIBAVFORMAT_VERSION_MAJOR >= 57)
119 + #define USE_SW
120 + #endif
121 + #ifdef USE_SW
122 + #include <libswresample/swresample.h>
123 + #else
124 #include <libavresample/avresample.h>
125 + #endif
126 #include <libavutil/mathematics.h>
127 #include <libavutil/pixfmt.h>
128 #include <libavutil/pixdesc.h>
129 @@ -106,7 +114,65 @@
130 #define PIX_FMT_YUV420P AV_PIX_FMT_YUV420P
131 #endif
133 - #if IS_FFMPEG_3_2
134 + #ifdef USE_SW
135 + #define SWR_CONVERT(ctx, out, linesize, out_count, in, linesize2, in_count) \
136 + swr_convert(ctx, out, out_count, (const uint8_t **)in, in_count)
137 + #define SWR_ALLOC() swr_alloc()
138 + #define SWR_CLOSE(ctx) {}
139 + #define SWR_FREE(ctx) swr_free(ctx)
140 + #define SWR_INIT(ctx) swr_init(ctx)
141 + #define SWRCONTEXT SwrContext
142 + #else
143 + #define SWR_CONVERT(ctx, out, linesize, out_count, in, linesize2, in_count) \
144 + avresample_convert(ctx, out, linesize, out_count, (uint8_t **)in, linesize2, in_count)
145 + #define SWR_ALLOC() avresample_alloc_context()
146 + #define SWR_CLOSE(ctx) avresample_close(ctx)
147 + #define SWR_FREE(ctx) avresample_free(ctx)
148 + #define SWR_INIT(ctx) avresample_open(ctx)
149 + #define SWRCONTEXT AVAudioResampleContext
150 + #endif
153 + #if (LIBAVFORMAT_VERSION_MAJOR >= 58)
154 + #define AV_REGISTER_ALL
155 + #define AVCODEC_REGISTER_ALL
156 + #define AV_FILENAME url
157 + #define MY_INPUT_BUFFER_PADDING_SIZE AV_INPUT_BUFFER_PADDING_SIZE
158 + #define AV_ALLOCATE_FRAME() av_frame_alloc()
159 + #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) av_image_alloc(av_frame->data, av_frame->linesize, width, height, pix_fmt, 1)
160 + #define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame)
161 + #define AV_FREE_FRAME(av_frame) av_frame_free(av_frame)
162 + #define AV_FREE_PACKET(av_packet) av_packet_unref(av_packet)
163 + #define AV_FREE_CONTEXT(av_context) avcodec_free_context(&av_context)
164 + #define AV_GET_CODEC_TYPE(av_stream) av_stream->codecpar->codec_type
165 + #define AV_FIND_DECODER_CODEC_ID(av_stream) av_stream->codecpar->codec_id
166 + auto AV_GET_CODEC_CONTEXT = [](AVStream* av_stream, AVCodec* av_codec) { \
167 + AVCodecContext *context = avcodec_alloc_context3(av_codec); \
168 + avcodec_parameters_to_context(context, av_stream->codecpar); \
169 + return context; \
170 + };
171 + #define AV_GET_CODEC_PAR_CONTEXT(av_stream, av_codec) av_codec;
172 + #define AV_GET_CODEC_FROM_STREAM(av_stream,codec_in)
173 + #define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context) av_stream->codecpar
174 + #define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context) (AVPixelFormat) av_stream->codecpar->format
175 + #define AV_GET_SAMPLE_FORMAT(av_stream, av_context) av_stream->codecpar->format
176 + #define AV_GET_IMAGE_SIZE(pix_fmt, width, height) av_image_get_buffer_size(pix_fmt, width, height, 1)
177 + #define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height) av_image_fill_arrays(av_frame->data, av_frame->linesize, buffer, pix_fmt, width, height, 1)
178 + #define AV_OUTPUT_CONTEXT(output_context, path) avformat_alloc_output_context2( output_context, NULL, NULL, path)
179 + #define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0)
180 + #define AV_OPTION_SET( av_stream, priv_data, name, value, avcodec) av_opt_set(priv_data, name, value, 0); avcodec_parameters_from_context(av_stream->codecpar, avcodec);
181 + #define AV_FORMAT_NEW_STREAM(oc, st_codec, av_codec, av_st) av_st = avformat_new_stream(oc, NULL);\
182 + if (!av_st) \
183 + throw OutOfMemory("Could not allocate memory for the video stream.", path); \
184 + c = avcodec_alloc_context3(av_codec); \
185 + st_codec = c; \
186 + av_st->codecpar->codec_id = av_codec->id;
187 + #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec) avcodec_parameters_from_context(av_stream->codecpar, av_codec);
188 + #elif IS_FFMPEG_3_2
189 + #define AV_REGISTER_ALL av_register_all();
190 + #define AVCODEC_REGISTER_ALL avcodec_register_all();
191 + #define AV_FILENAME filename
192 + #define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE
193 #define AV_ALLOCATE_FRAME() av_frame_alloc()
194 #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) av_image_alloc(av_frame->data, av_frame->linesize, width, height, pix_fmt, 1)
195 #define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame)
196 @@ -138,6 +204,10 @@
197 av_st->codecpar->codec_id = av_codec->id;
198 #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec) avcodec_parameters_from_context(av_stream->codecpar, av_codec);
199 #elif LIBAVFORMAT_VERSION_MAJOR >= 55
200 + #define AV_REGISTER_ALL av_register_all();
201 + #define AVCODEC_REGISTER_ALL avcodec_register_all();
202 + #define AV_FILENAME filename
203 + #define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE
204 #define AV_ALLOCATE_FRAME() av_frame_alloc()
205 #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) avpicture_alloc((AVPicture *) av_frame, pix_fmt, width, height)
206 #define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame)
207 @@ -164,6 +234,10 @@
208 c = av_st->codec;
209 #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec)
210 #else
211 + #define AV_REGISTER_ALL av_register_all();
212 + #define AVCODEC_REGISTER_ALL avcodec_register_all();
213 + #define AV_FILENAME filename
214 + #define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE
215 #define AV_ALLOCATE_FRAME() avcodec_alloc_frame()
216 #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) avpicture_alloc((AVPicture *) av_frame, pix_fmt, width, height)
217 #define AV_RESET_FRAME(av_frame) avcodec_get_frame_defaults(av_frame)
218 diff --git a/include/FFmpegWriter.h b/include/FFmpegWriter.h
219 index 8343002..7eefacb 100644
220 --- a/include/FFmpegWriter.h
221 +++ b/include/FFmpegWriter.h
222 @@ -174,8 +174,8 @@ namespace openshot
223 int initial_audio_input_frame_size;
224 int audio_input_position;
225 int audio_encoder_buffer_size;
226 - AVAudioResampleContext *avr;
227 - AVAudioResampleContext *avr_planar;
228 + SWRCONTEXT *avr;
229 + SWRCONTEXT *avr_planar;
231 /* Resample options */
232 int original_sample_rate;
233 diff --git a/include/Frame.h b/include/Frame.h
234 index a7ad509..eba7f8b 100644
235 --- a/include/Frame.h
236 +++ b/include/Frame.h
237 @@ -62,7 +62,7 @@
238 #include "AudioResampler.h"
239 #include "Fraction.h"
242 +#pragma SWIG nowarn=362
243 using namespace std;
245 namespace openshot
246 diff --git a/include/FrameMapper.h b/include/FrameMapper.h
247 index e70fdbc..043b5e4 100644
248 --- a/include/FrameMapper.h
249 +++ b/include/FrameMapper.h
250 @@ -146,7 +146,7 @@ namespace openshot
251 ReaderBase *reader; // The source video reader
252 CacheMemory final_cache; // Cache of actual Frame objects
253 bool is_dirty; // When this is true, the next call to GetFrame will re-init the mapping
254 - AVAudioResampleContext *avr; // Audio resampling context object
255 + SWRCONTEXT *avr; // Audio resampling context object
257 // Internal methods used by init
258 void AddField(int64_t frame);
259 diff --git a/include/OpenMPUtilities.h b/include/OpenMPUtilities.h
260 index 8a95a95..c0f5597 100644
261 --- a/include/OpenMPUtilities.h
262 +++ b/include/OpenMPUtilities.h
263 @@ -29,8 +29,26 @@
264 #define OPENSHOT_OPENMP_UTILITIES_H
266 #include <omp.h>
267 +#include <stdlib.h>
268 +#include <string.h>
270 - // Calculate the # of OpenMP Threads to allow
271 - #define OPEN_MP_NUM_PROCESSORS omp_get_num_procs()
272 +// Calculate the # of OpenMP Threads to allow
273 +#define OPEN_MP_NUM_PROCESSORS omp_get_num_procs()
275 +using namespace std;
277 +namespace openshot {
279 + // Check if OS2_OMP_THREADS environment variable is present, and return
280 + // if multiple threads should be used with OMP
281 + static bool IsOMPEnabled() {
282 + char* OS2_OMP_THREADS = getenv("OS2_OMP_THREADS");
283 + if (OS2_OMP_THREADS != NULL && strcmp(OS2_OMP_THREADS, "0") == 0)
284 + return false;
285 + else
286 + return true;
291 #endif
292 diff --git a/include/ZmqLogger.h b/include/ZmqLogger.h
293 index c134f2c..e825ed0 100644
294 --- a/include/ZmqLogger.h
295 +++ b/include/ZmqLogger.h
296 @@ -72,11 +72,19 @@ namespace openshot {
297 /// Default constructor
298 ZmqLogger(){}; // Don't allow user to create an instance of this singleton
300 +#if __GNUC__ >=7
301 /// Default copy method
302 - ZmqLogger(ZmqLogger const&){}; // Don't allow the user to copy this instance
303 + ZmqLogger(ZmqLogger const&) = delete; // Don't allow the user to assign this instance
305 /// Default assignment operator
306 - ZmqLogger & operator=(ZmqLogger const&){}; // Don't allow the user to assign this instance
307 + ZmqLogger & operator=(ZmqLogger const&) = delete; // Don't allow the user to assign this instance
308 +#else
309 + /// Default copy method
310 + ZmqLogger(ZmqLogger const&) {}; // Don't allow the user to assign this instance
312 + /// Default assignment operator
313 + ZmqLogger & operator=(ZmqLogger const&); // Don't allow the user to assign this instance
314 +#endif
316 /// Private variable to keep track of singleton instance
317 static ZmqLogger * m_pInstance;
318 diff --git a/src/Clip.cpp b/src/Clip.cpp
319 index 913fd71..63e7741 100644
320 --- a/src/Clip.cpp
321 +++ b/src/Clip.cpp
322 @@ -925,13 +925,14 @@ void Clip::SetJsonValue(Json::Value root) {
324 if (!existing_effect["type"].isNull()) {
325 // Create instance of effect
326 - e = EffectInfo().CreateEffect(existing_effect["type"].asString());
327 + if (e = EffectInfo().CreateEffect(existing_effect["type"].asString())) {
329 - // Load Json into Effect
330 - e->SetJsonValue(existing_effect);
331 + // Load Json into Effect
332 + e->SetJsonValue(existing_effect);
334 - // Add Effect to Timeline
335 - AddEffect(e);
336 + // Add Effect to Timeline
337 + AddEffect(e);
342 diff --git a/src/EffectInfo.cpp b/src/EffectInfo.cpp
343 index 23bc9d0..f9e4c40 100644
344 --- a/src/EffectInfo.cpp
345 +++ b/src/EffectInfo.cpp
346 @@ -82,6 +82,7 @@ EffectBase* EffectInfo::CreateEffect(string effect_type) {
348 else if (effect_type == "Wave")
349 return new Wave();
350 + return NULL;
353 // Generate Json::JsonValue for this object
354 diff --git a/src/FFmpegReader.cpp b/src/FFmpegReader.cpp
355 index 5a4c936..3e29cf7 100644
356 --- a/src/FFmpegReader.cpp
357 +++ b/src/FFmpegReader.cpp
358 @@ -37,11 +37,12 @@ FFmpegReader::FFmpegReader(string path)
359 audio_pts_offset(99999), video_pts_offset(99999), path(path), is_video_seek(true), check_interlace(false),
360 check_fps(false), enable_seek(true), is_open(false), seek_audio_frame_found(0), seek_video_frame_found(0),
361 prev_samples(0), prev_pts(0), pts_total(0), pts_counter(0), is_duration_known(false), largest_frame_processed(0),
362 - current_video_frame(0), has_missing_frames(false), num_packets_since_video_frame(0), num_checks_since_final(0), packet(NULL) {
363 + current_video_frame(0), has_missing_frames(false), num_packets_since_video_frame(0), num_checks_since_final(0),
364 + packet(NULL), use_omp_threads(true) {
366 // Initialize FFMpeg, and register all formats and codecs
367 - av_register_all();
368 - avcodec_register_all();
369 + AV_REGISTER_ALL
370 + AVCODEC_REGISTER_ALL
372 // Init cache
373 working_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * info.fps.ToDouble() * 2, info.width, info.height, info.sample_rate, info.channels);
374 @@ -58,11 +59,12 @@ FFmpegReader::FFmpegReader(string path, bool inspect_reader)
375 audio_pts_offset(99999), video_pts_offset(99999), path(path), is_video_seek(true), check_interlace(false),
376 check_fps(false), enable_seek(true), is_open(false), seek_audio_frame_found(0), seek_video_frame_found(0),
377 prev_samples(0), prev_pts(0), pts_total(0), pts_counter(0), is_duration_known(false), largest_frame_processed(0),
378 - current_video_frame(0), has_missing_frames(false), num_packets_since_video_frame(0), num_checks_since_final(0), packet(NULL) {
379 + current_video_frame(0), has_missing_frames(false), num_packets_since_video_frame(0), num_checks_since_final(0),
380 + packet(NULL), use_omp_threads(true) {
382 // Initialize FFMpeg, and register all formats and codecs
383 - av_register_all();
384 - avcodec_register_all();
385 + AV_REGISTER_ALL
386 + AVCODEC_REGISTER_ALL
388 // Init cache
389 working_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * info.fps.ToDouble() * 2, info.width, info.height, info.sample_rate, info.channels);
390 @@ -227,6 +229,9 @@ void FFmpegReader::Open()
391 missing_frames.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels);
392 final_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels);
394 + // Initialize OMP threading support
395 + use_omp_threads = openshot::IsOMPEnabled();
397 // Mark as "open"
398 is_open = true;
400 @@ -606,6 +611,12 @@ std::shared_ptr<Frame> FFmpegReader::ReadStream(int64_t requested_frame)
402 // Process Video Packet
403 ProcessVideoPacket(requested_frame);
405 + if (!use_omp_threads) {
406 + // Wait on each OMP task to complete before moving on to the next one. This slows
407 + // down processing considerably, but might be more stable on some systems.
408 + #pragma omp taskwait
413 @@ -638,7 +649,6 @@ std::shared_ptr<Frame> FFmpegReader::ReadStream(int64_t requested_frame)
416 // Check if working frames are 'finished'
417 - bool is_cache_found = false;
418 if (!is_seeking) {
419 // Check for any missing frames
420 CheckMissingFrame(requested_frame);
421 @@ -648,7 +658,7 @@ std::shared_ptr<Frame> FFmpegReader::ReadStream(int64_t requested_frame)
424 // Check if requested 'final' frame is available
425 - is_cache_found = (final_cache.GetFrame(requested_frame) != NULL);
426 + bool is_cache_found = (final_cache.GetFrame(requested_frame) != NULL);
428 // Increment frames processed
429 packets_processed++;
430 @@ -978,7 +988,7 @@ void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_fr
431 int data_size = 0;
433 // re-initialize buffer size (it gets changed in the avcodec_decode_audio2 method call)
434 - int buf_size = AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE;
435 + int buf_size = AVCODEC_MAX_AUDIO_FRAME_SIZE + MY_INPUT_BUFFER_PADDING_SIZE;
436 #pragma omp critical (ProcessAudioPacket)
438 #if IS_FFMPEG_3_2
439 @@ -1083,7 +1093,7 @@ void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_fr
442 // Allocate audio buffer
443 - int16_t *audio_buf = new int16_t[AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
444 + int16_t *audio_buf = new int16_t[AVCODEC_MAX_AUDIO_FRAME_SIZE + MY_INPUT_BUFFER_PADDING_SIZE];
446 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (ReSample)", "packet_samples", packet_samples, "info.channels", info.channels, "info.sample_rate", info.sample_rate, "aCodecCtx->sample_fmt", AV_GET_SAMPLE_FORMAT(aStream, aCodecCtx), "AV_SAMPLE_FMT_S16", AV_SAMPLE_FMT_S16, "", -1);
448 @@ -1093,11 +1103,11 @@ void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_fr
449 audio_converted->nb_samples = audio_frame->nb_samples;
450 av_samples_alloc(audio_converted->data, audio_converted->linesize, info.channels, audio_frame->nb_samples, AV_SAMPLE_FMT_S16, 0);
452 - AVAudioResampleContext *avr = NULL;
453 + SWRCONTEXT *avr = NULL;
454 int nb_samples = 0;
456 // setup resample context
457 - avr = avresample_alloc_context();
458 + avr = SWR_ALLOC();
459 av_opt_set_int(avr, "in_channel_layout", AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channel_layout, 0);
460 av_opt_set_int(avr, "out_channel_layout", AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channel_layout, 0);
461 av_opt_set_int(avr, "in_sample_fmt", AV_GET_SAMPLE_FORMAT(aStream, aCodecCtx), 0);
462 @@ -1106,10 +1116,10 @@ void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_fr
463 av_opt_set_int(avr, "out_sample_rate", info.sample_rate, 0);
464 av_opt_set_int(avr, "in_channels", info.channels, 0);
465 av_opt_set_int(avr, "out_channels", info.channels, 0);
466 - int r = avresample_open(avr);
467 + int r = SWR_INIT(avr);
469 // Convert audio samples
470 - nb_samples = avresample_convert(avr, // audio resample context
471 + nb_samples = SWR_CONVERT(avr, // audio resample context
472 audio_converted->data, // output data pointers
473 audio_converted->linesize[0], // output plane size, in bytes. (0 if unknown)
474 audio_converted->nb_samples, // maximum number of samples that the output buffer can hold
475 @@ -1121,8 +1131,8 @@ void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_fr
476 memcpy(audio_buf, audio_converted->data[0], audio_converted->nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * info.channels);
478 // Deallocate resample buffer
479 - avresample_close(avr);
480 - avresample_free(&avr);
481 + SWR_CLOSE(avr);
482 + SWR_FREE(&avr);
483 avr = NULL;
485 // Free AVFrames
486 @@ -1348,7 +1358,7 @@ void FFmpegReader::Seek(int64_t requested_frame)
488 seek_target = ConvertFrameToVideoPTS(requested_frame - buffer_amount);
489 if (av_seek_frame(pFormatCtx, info.video_stream_index, seek_target, AVSEEK_FLAG_BACKWARD) < 0) {
490 - fprintf(stderr, "%s: error while seeking video stream\n", pFormatCtx->filename);
491 + fprintf(stderr, "%s: error while seeking video stream\n", pFormatCtx->AV_FILENAME);
492 } else
494 // VIDEO SEEK
495 @@ -1362,7 +1372,7 @@ void FFmpegReader::Seek(int64_t requested_frame)
497 seek_target = ConvertFrameToAudioPTS(requested_frame - buffer_amount);
498 if (av_seek_frame(pFormatCtx, info.audio_stream_index, seek_target, AVSEEK_FLAG_BACKWARD) < 0) {
499 - fprintf(stderr, "%s: error while seeking audio stream\n", pFormatCtx->filename);
500 + fprintf(stderr, "%s: error while seeking audio stream\n", pFormatCtx->AV_FILENAME);
501 } else
503 // AUDIO SEEK
504 diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp
505 index 4416040..d106901 100644
506 --- a/src/FFmpegWriter.cpp
507 +++ b/src/FFmpegWriter.cpp
508 @@ -46,7 +46,7 @@ FFmpegWriter::FFmpegWriter(string path) :
509 info.has_video = false;
511 // Initialize FFMpeg, and register all formats and codecs
512 - av_register_all();
513 + AV_REGISTER_ALL
515 // auto detect format
516 auto_detect_format();
517 @@ -299,7 +299,7 @@ void FFmpegWriter::SetOption(StreamType stream, string name, string value)
518 /// Determine if codec name is valid
519 bool FFmpegWriter::IsValidCodec(string codec_name) {
520 // Initialize FFMpeg, and register all formats and codecs
521 - av_register_all();
522 + AV_REGISTER_ALL
524 // Find the codec (if any)
525 if (avcodec_find_encoder_by_name(codec_name.c_str()) == NULL)
526 @@ -342,7 +342,7 @@ void FFmpegWriter::WriteHeader()
529 // Force the output filename (which doesn't always happen for some reason)
530 - snprintf(oc->filename, sizeof(oc->filename), "%s", path.c_str());
531 + snprintf(oc->AV_FILENAME, sizeof(oc->AV_FILENAME), "%s", path.c_str());
533 // Write the stream header, if any
534 // TODO: add avoptions / parameters instead of NULL
535 @@ -559,8 +559,10 @@ void FFmpegWriter::flush_encoders()
537 if (info.has_audio && audio_codec && AV_GET_CODEC_TYPE(audio_st) == AVMEDIA_TYPE_AUDIO && AV_GET_CODEC_ATTRIBUTES(audio_st, audio_codec)->frame_size <= 1)
538 return;
539 +#if (LIBAVFORMAT_VERSION_MAJOR < 58)
540 if (info.has_video && video_codec && AV_GET_CODEC_TYPE(video_st) == AVMEDIA_TYPE_VIDEO && (oc->oformat->flags & AVFMT_RAWPICTURE) && AV_FIND_DECODER_CODEC_ID(video_st) == AV_CODEC_ID_RAWVIDEO)
541 return;
542 +#endif
544 int error_code = 0;
545 int stop_encoding = 1;
546 @@ -734,14 +736,14 @@ void FFmpegWriter::close_audio(AVFormatContext *oc, AVStream *st)
548 // Deallocate resample buffer
549 if (avr) {
550 - avresample_close(avr);
551 - avresample_free(&avr);
552 + SWR_CLOSE(avr);
553 + SWR_FREE(&avr);
554 avr = NULL;
557 if (avr_planar) {
558 - avresample_close(avr_planar);
559 - avresample_free(&avr_planar);
560 + SWR_CLOSE(avr_planar);
561 + SWR_FREE(&avr_planar);
562 avr_planar = NULL;
565 @@ -881,7 +883,11 @@ AVStream* FFmpegWriter::add_audio_stream()
567 // some formats want stream headers to be separate
568 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
569 +#if (LIBAVCODEC_VERSION_MAJOR >= 57)
570 + c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
571 +#else
572 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
573 +#endif
575 AV_COPY_PARAMS_FROM_CONTEXT(st, c);
576 ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::add_audio_stream", "c->codec_id", c->codec_id, "c->bit_rate", c->bit_rate, "c->channels", c->channels, "c->sample_fmt", c->sample_fmt, "c->channel_layout", c->channel_layout, "c->sample_rate", c->sample_rate);
577 @@ -953,7 +959,11 @@ AVStream* FFmpegWriter::add_video_stream()
578 c->mb_decision = 2;
579 // some formats want stream headers to be separate
580 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
581 +#if (LIBAVCODEC_VERSION_MAJOR >= 57)
582 + c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
583 +#else
584 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
585 +#endif
587 // Find all supported pixel formats for this codec
588 const PixelFormat* supported_pixel_formats = codec->pix_fmts;
589 @@ -970,10 +980,12 @@ AVStream* FFmpegWriter::add_video_stream()
590 // Raw video should use RGB24
591 c->pix_fmt = PIX_FMT_RGB24;
593 +#if (LIBAVFORMAT_VERSION_MAJOR < 58)
594 if (strcmp(fmt->name, "gif") != 0)
595 // If not GIF format, skip the encoding process
596 // Set raw picture flag (so we don't encode this video)
597 oc->oformat->flags |= AVFMT_RAWPICTURE;
598 +#endif
599 } else {
600 // Set the default codec
601 c->pix_fmt = PIX_FMT_YUV420P;
602 @@ -981,7 +993,11 @@ AVStream* FFmpegWriter::add_video_stream()
605 AV_COPY_PARAMS_FROM_CONTEXT(st, c);
606 +#if (LIBAVFORMAT_VERSION_MAJOR < 58)
607 ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::add_video_stream (" + (string)fmt->name + " : " + (string)av_get_pix_fmt_name(c->pix_fmt) + ")", "c->codec_id", c->codec_id, "c->bit_rate", c->bit_rate, "c->pix_fmt", c->pix_fmt, "oc->oformat->flags", oc->oformat->flags, "AVFMT_RAWPICTURE", AVFMT_RAWPICTURE, "", -1);
608 +#else
609 + ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::add_video_stream (" + (string)fmt->name + " : " + (string)av_get_pix_fmt_name(c->pix_fmt) + ")", "c->codec_id", c->codec_id, "c->bit_rate", c->bit_rate, "c->pix_fmt", c->pix_fmt, "oc->oformat->flags", oc->oformat->flags, "", -1, "", -1);
610 +#endif
612 return st;
614 @@ -1056,7 +1072,7 @@ void FFmpegWriter::open_audio(AVFormatContext *oc, AVStream *st)
615 av_dict_set(&st->metadata, iter->first.c_str(), iter->second.c_str(), 0);
618 - ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_audio", "audio_codec->thread_count", audio_codec->thread_count, "audio_input_frame_size", audio_input_frame_size, "buffer_size", AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE, "", -1, "", -1, "", -1);
619 + ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_audio", "audio_codec->thread_count", audio_codec->thread_count, "audio_input_frame_size", audio_input_frame_size, "buffer_size", AVCODEC_MAX_AUDIO_FRAME_SIZE + MY_INPUT_BUFFER_PADDING_SIZE, "", -1, "", -1, "", -1);
623 @@ -1222,7 +1238,7 @@ void FFmpegWriter::write_audio_packets(bool final)
625 // setup resample context
626 if (!avr) {
627 - avr = avresample_alloc_context();
628 + avr = SWR_ALLOC();
629 av_opt_set_int(avr, "in_channel_layout", channel_layout_in_frame, 0);
630 av_opt_set_int(avr, "out_channel_layout", info.channel_layout, 0);
631 av_opt_set_int(avr, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
632 @@ -1231,12 +1247,12 @@ void FFmpegWriter::write_audio_packets(bool final)
633 av_opt_set_int(avr, "out_sample_rate", info.sample_rate, 0);
634 av_opt_set_int(avr, "in_channels", channels_in_frame, 0);
635 av_opt_set_int(avr, "out_channels", info.channels, 0);
636 - avresample_open(avr);
637 + SWR_INIT(avr);
639 int nb_samples = 0;
641 // Convert audio samples
642 - nb_samples = avresample_convert(avr, // audio resample context
643 + nb_samples = SWR_CONVERT(avr, // audio resample context
644 audio_converted->data, // output data pointers
645 audio_converted->linesize[0], // output plane size, in bytes. (0 if unknown)
646 audio_converted->nb_samples, // maximum number of samples that the output buffer can hold
647 @@ -1297,7 +1313,7 @@ void FFmpegWriter::write_audio_packets(bool final)
649 // setup resample context
650 if (!avr_planar) {
651 - avr_planar = avresample_alloc_context();
652 + avr_planar = SWR_ALLOC();
653 av_opt_set_int(avr_planar, "in_channel_layout", info.channel_layout, 0);
654 av_opt_set_int(avr_planar, "out_channel_layout", info.channel_layout, 0);
655 av_opt_set_int(avr_planar, "in_sample_fmt", output_sample_fmt, 0);
656 @@ -1306,7 +1322,7 @@ void FFmpegWriter::write_audio_packets(bool final)
657 av_opt_set_int(avr_planar, "out_sample_rate", info.sample_rate, 0);
658 av_opt_set_int(avr_planar, "in_channels", info.channels, 0);
659 av_opt_set_int(avr_planar, "out_channels", info.channels, 0);
660 - avresample_open(avr_planar);
661 + SWR_INIT(avr_planar);
664 // Create input frame (and allocate arrays)
665 @@ -1329,7 +1345,7 @@ void FFmpegWriter::write_audio_packets(bool final)
666 av_samples_alloc(frame_final->data, frame_final->linesize, info.channels, frame_final->nb_samples, audio_codec->sample_fmt, 0);
668 // Convert audio samples
669 - int nb_samples = avresample_convert(avr_planar, // audio resample context
670 + int nb_samples = SWR_CONVERT(avr_planar, // audio resample context
671 frame_final->data, // output data pointers
672 frame_final->linesize[0], // output plane size, in bytes. (0 if unknown)
673 frame_final->nb_samples, // maximum number of samples that the output buffer can hold
674 @@ -1560,6 +1576,9 @@ void FFmpegWriter::process_video_packet(std::shared_ptr<Frame> frame)
675 // write video frame
676 bool FFmpegWriter::write_video_packet(std::shared_ptr<Frame> frame, AVFrame* frame_final)
678 +#if (LIBAVFORMAT_VERSION_MAJOR >= 58)
679 + ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet", "frame->number", frame->number, "oc->oformat->flags", oc->oformat->flags, "", -1, "", -1, "", -1, "", -1);
680 +#else
681 ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet", "frame->number", frame->number, "oc->oformat->flags & AVFMT_RAWPICTURE", oc->oformat->flags & AVFMT_RAWPICTURE, "", -1, "", -1, "", -1, "", -1);
683 if (oc->oformat->flags & AVFMT_RAWPICTURE) {
684 @@ -1587,7 +1606,9 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr<Frame> frame, AVFrame* fra
685 // Deallocate packet
686 AV_FREE_PACKET(&pkt);
688 - } else {
689 + } else
690 +#endif
693 AVPacket pkt;
694 av_init_packet(&pkt);
695 diff --git a/src/FrameMapper.cpp b/src/FrameMapper.cpp
696 index f49cbc4..c4c68f5 100644
697 --- a/src/FrameMapper.cpp
698 +++ b/src/FrameMapper.cpp
699 @@ -650,8 +650,8 @@ void FrameMapper::Close()
701 // Deallocate resample buffer
702 if (avr) {
703 - avresample_close(avr);
704 - avresample_free(&avr);
705 + SWR_CLOSE(avr);
706 + SWR_FREE(&avr);
707 avr = NULL;
710 @@ -741,8 +741,8 @@ void FrameMapper::ChangeMapping(Fraction target_fps, PulldownType target_pulldow
712 // Deallocate resample buffer
713 if (avr) {
714 - avresample_close(avr);
715 - avresample_free(&avr);
716 + SWR_CLOSE(avr);
717 + SWR_FREE(&avr);
718 avr = NULL;
721 @@ -817,7 +817,7 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr<Frame> frame, int64_t orig
723 // setup resample context
724 if (!avr) {
725 - avr = avresample_alloc_context();
726 + avr = SWR_ALLOC();
727 av_opt_set_int(avr, "in_channel_layout", channel_layout_in_frame, 0);
728 av_opt_set_int(avr, "out_channel_layout", info.channel_layout, 0);
729 av_opt_set_int(avr, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
730 @@ -826,11 +826,11 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr<Frame> frame, int64_t orig
731 av_opt_set_int(avr, "out_sample_rate", info.sample_rate, 0);
732 av_opt_set_int(avr, "in_channels", channels_in_frame, 0);
733 av_opt_set_int(avr, "out_channels", info.channels, 0);
734 - avresample_open(avr);
735 + SWR_INIT(avr);
738 // Convert audio samples
739 - nb_samples = avresample_convert(avr, // audio resample context
740 + nb_samples = SWR_CONVERT(avr, // audio resample context
741 audio_converted->data, // output data pointers
742 audio_converted->linesize[0], // output plane size, in bytes. (0 if unknown)
743 audio_converted->nb_samples, // maximum number of samples that the output buffer can hold
744 diff --git a/src/Timeline.cpp b/src/Timeline.cpp
745 index 1b4f475..ed3c3df 100644
746 --- a/src/Timeline.cpp
747 +++ b/src/Timeline.cpp
748 @@ -1000,13 +1000,14 @@ void Timeline::SetJsonValue(Json::Value root) {
750 if (!existing_effect["type"].isNull()) {
751 // Create instance of effect
752 - e = EffectInfo().CreateEffect(existing_effect["type"].asString());
753 + if (e = EffectInfo().CreateEffect(existing_effect["type"].asString())) {
755 - // Load Json into Effect
756 - e->SetJsonValue(existing_effect);
757 + // Load Json into Effect
758 + e->SetJsonValue(existing_effect);
760 - // Add Effect to Timeline
761 - AddEffect(e);
762 + // Add Effect to Timeline
763 + AddEffect(e);
768 @@ -1270,13 +1271,14 @@ void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_ef
769 EffectBase *e = NULL;
771 // Init the matching effect object
772 - e = EffectInfo().CreateEffect(effect_type);
773 + if (e = EffectInfo().CreateEffect(effect_type)) {
775 - // Load Json into Effect
776 - e->SetJsonValue(change["value"]);
777 + // Load Json into Effect
778 + e->SetJsonValue(change["value"]);
780 - // Add Effect to Timeline
781 - AddEffect(e);
782 + // Add Effect to Timeline
783 + AddEffect(e);
786 } else if (change_type == "update") {
788 diff --git a/tests/ReaderBase_Tests.cpp b/tests/ReaderBase_Tests.cpp
789 index 9d43530..70ca90d 100644
790 --- a/tests/ReaderBase_Tests.cpp
791 +++ b/tests/ReaderBase_Tests.cpp
792 @@ -44,9 +44,9 @@ TEST(ReaderBase_Derived_Class)
793 std::shared_ptr<Frame> GetFrame(int64_t number) { std::shared_ptr<Frame> f(new Frame()); return f; }
794 void Close() { };
795 void Open() { };
796 - string Json() { };
797 + string Json() { return NULL; };
798 void SetJson(string value) { };
799 - Json::Value JsonValue() { };
800 + Json::Value JsonValue() { return (int) NULL; };
801 void SetJsonValue(Json::Value root) { };
802 bool IsOpen() { return true; };
803 string Name() { return "TestReader"; };