2 * An example showing how to play a stream sync'd to video, using ffmpeg.
7 #include <condition_variable>
31 #include "libavcodec/avcodec.h"
32 #include "libavformat/avformat.h"
33 #include "libavformat/avio.h"
34 #include "libavformat/version.h"
35 #include "libavutil/avutil.h"
36 #include "libavutil/error.h"
37 #include "libavutil/frame.h"
38 #include "libavutil/mem.h"
39 #include "libavutil/pixfmt.h"
40 #include "libavutil/rational.h"
41 #include "libavutil/samplefmt.h"
42 #include "libavutil/time.h"
43 #include "libavutil/version.h"
44 #include "libavutil/channel_layout.h"
45 #include "libswscale/swscale.h"
46 #include "libswresample/swresample.h"
57 #include "common/alhelpers.h"
60 /* Undefine this to disable use of experimental extensions. Don't use for
61 * production code! Interfaces and behavior may change prior to being
64 #define ALLOW_EXPERIMENTAL_EXTS
66 #ifdef ALLOW_EXPERIMENTAL_EXTS
67 #ifndef AL_SOFT_map_buffer
68 #define AL_SOFT_map_buffer 1
69 typedef unsigned int ALbitfieldSOFT
;
70 #define AL_MAP_READ_BIT_SOFT 0x00000001
71 #define AL_MAP_WRITE_BIT_SOFT 0x00000002
72 #define AL_MAP_PERSISTENT_BIT_SOFT 0x00000004
73 #define AL_PRESERVE_DATA_BIT_SOFT 0x00000008
74 typedef void (AL_APIENTRY
*LPALBUFFERSTORAGESOFT
)(ALuint buffer
, ALenum format
, const ALvoid
*data
, ALsizei size
, ALsizei freq
, ALbitfieldSOFT flags
);
75 typedef void* (AL_APIENTRY
*LPALMAPBUFFERSOFT
)(ALuint buffer
, ALsizei offset
, ALsizei length
, ALbitfieldSOFT access
);
76 typedef void (AL_APIENTRY
*LPALUNMAPBUFFERSOFT
)(ALuint buffer
);
77 typedef void (AL_APIENTRY
*LPALFLUSHMAPPEDBUFFERSOFT
)(ALuint buffer
, ALsizei offset
, ALsizei length
);
80 #ifndef AL_SOFT_events
81 #define AL_SOFT_events 1
82 #define AL_EVENT_CALLBACK_FUNCTION_SOFT 0x1220
83 #define AL_EVENT_CALLBACK_USER_PARAM_SOFT 0x1221
84 #define AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT 0x1222
85 #define AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT 0x1223
86 #define AL_EVENT_TYPE_ERROR_SOFT 0x1224
87 #define AL_EVENT_TYPE_PERFORMANCE_SOFT 0x1225
88 #define AL_EVENT_TYPE_DEPRECATED_SOFT 0x1226
89 #define AL_EVENT_TYPE_DISCONNECTED_SOFT 0x1227
90 typedef void (AL_APIENTRY
*ALEVENTPROCSOFT
)(ALenum eventType
, ALuint object
, ALuint param
,
91 ALsizei length
, const ALchar
*message
,
93 typedef void (AL_APIENTRY
*LPALEVENTCONTROLSOFT
)(ALsizei count
, const ALenum
*types
, ALboolean enable
);
94 typedef void (AL_APIENTRY
*LPALEVENTCALLBACKSOFT
)(ALEVENTPROCSOFT callback
, void *userParam
);
95 typedef void* (AL_APIENTRY
*LPALGETPOINTERSOFT
)(ALenum pname
);
96 typedef void (AL_APIENTRY
*LPALGETPOINTERVSOFT
)(ALenum pname
, void **values
);
99 #ifndef AL_SOFT_bformat_ex
100 #define AL_SOFT_bformat_ex
101 #define AL_AMBISONIC_LAYOUT_SOFT 0x1997
102 #define AL_AMBISONIC_SCALING_SOFT 0x1998
103 #define AL_FUMA_SOFT 0x0000
104 #define AL_ACN_SOFT 0x0001
105 #define AL_SN3D_SOFT 0x0001
106 #define AL_N3D_SOFT 0x0002
108 #endif /* ALLOW_EXPERIMENTAL_EXTS */
113 inline constexpr int64_t operator "" _i64(unsigned long long int n
) noexcept
{ return static_cast<int64_t>(n
); }
116 #define M_PI (3.14159265358979323846)
119 using fixed32
= std::chrono::duration
<int64_t,std::ratio
<1,(1_i64
<<32)>>;
120 using nanoseconds
= std::chrono::nanoseconds
;
121 using microseconds
= std::chrono::microseconds
;
122 using milliseconds
= std::chrono::milliseconds
;
123 using seconds
= std::chrono::seconds
;
124 using seconds_d64
= std::chrono::duration
<double>;
126 const std::string AppName
{"alffplay"};
128 bool EnableDirectOut
{false};
129 bool EnableWideStereo
{false};
130 bool DisableVideo
{false};
131 LPALGETSOURCEI64VSOFT alGetSourcei64vSOFT
;
132 LPALCGETINTEGER64VSOFT alcGetInteger64vSOFT
;
134 #ifdef AL_SOFT_map_buffer
135 LPALBUFFERSTORAGESOFT alBufferStorageSOFT
;
136 LPALMAPBUFFERSOFT alMapBufferSOFT
;
137 LPALUNMAPBUFFERSOFT alUnmapBufferSOFT
;
140 #ifdef AL_SOFT_events
141 LPALEVENTCONTROLSOFT alEventControlSOFT
;
142 LPALEVENTCALLBACKSOFT alEventCallbackSOFT
;
145 const seconds AVNoSyncThreshold
{10};
147 const milliseconds VideoSyncThreshold
{10};
148 #define VIDEO_PICTURE_QUEUE_SIZE 24
150 const seconds_d64 AudioSyncThreshold
{0.03};
151 const milliseconds AudioSampleCorrectionMax
{50};
152 /* Averaging filter coefficient for audio sync. */
153 #define AUDIO_DIFF_AVG_NB 20
154 const double AudioAvgFilterCoeff
{std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB
)};
155 /* Per-buffer size, in time */
156 const milliseconds AudioBufferTime
{20};
157 /* Buffer total size, in time (should be divisible by the buffer time) */
158 const milliseconds AudioBufferTotalTime
{800};
161 FF_MOVIE_DONE_EVENT
= SDL_USEREVENT
164 enum class SyncMaster
{
173 inline microseconds
get_avtime()
174 { return microseconds
{av_gettime()}; }
176 /* Define unique_ptrs to auto-cleanup associated ffmpeg objects. */
177 struct AVIOContextDeleter
{
178 void operator()(AVIOContext
*ptr
) { avio_closep(&ptr
); }
180 using AVIOContextPtr
= std::unique_ptr
<AVIOContext
,AVIOContextDeleter
>;
182 struct AVFormatCtxDeleter
{
183 void operator()(AVFormatContext
*ptr
) { avformat_close_input(&ptr
); }
185 using AVFormatCtxPtr
= std::unique_ptr
<AVFormatContext
,AVFormatCtxDeleter
>;
187 struct AVCodecCtxDeleter
{
188 void operator()(AVCodecContext
*ptr
) { avcodec_free_context(&ptr
); }
190 using AVCodecCtxPtr
= std::unique_ptr
<AVCodecContext
,AVCodecCtxDeleter
>;
192 struct AVFrameDeleter
{
193 void operator()(AVFrame
*ptr
) { av_frame_free(&ptr
); }
195 using AVFramePtr
= std::unique_ptr
<AVFrame
,AVFrameDeleter
>;
197 struct SwrContextDeleter
{
198 void operator()(SwrContext
*ptr
) { swr_free(&ptr
); }
200 using SwrContextPtr
= std::unique_ptr
<SwrContext
,SwrContextDeleter
>;
202 struct SwsContextDeleter
{
203 void operator()(SwsContext
*ptr
) { sws_freeContext(ptr
); }
205 using SwsContextPtr
= std::unique_ptr
<SwsContext
,SwsContextDeleter
>;
208 template<size_t SizeLimit
>
211 std::condition_variable mCondVar
;
212 std::deque
<AVPacket
> mPackets
;
213 size_t mTotalSize
{0};
214 bool mFinished
{false};
216 AVPacket
*getPacket(std::unique_lock
<std::mutex
> &lock
)
218 while(mPackets
.empty() && !mFinished
)
220 return mPackets
.empty() ? nullptr : &mPackets
.front();
225 AVPacket
*pkt
= &mPackets
.front();
226 mTotalSize
-= static_cast<unsigned int>(pkt
->size
);
227 av_packet_unref(pkt
);
228 mPackets
.pop_front();
234 for(AVPacket
&pkt
: mPackets
)
235 av_packet_unref(&pkt
);
240 int sendTo(AVCodecContext
*codecctx
)
242 std::unique_lock
<std::mutex
> lock
{mMutex
};
244 AVPacket
*pkt
{getPacket(lock
)};
245 if(!pkt
) return avcodec_send_packet(codecctx
, nullptr);
247 const int ret
{avcodec_send_packet(codecctx
, pkt
)};
248 if(ret
!= AVERROR(EAGAIN
))
251 std::cerr
<< "Failed to send packet: "<<ret
<<std::endl
;
260 std::lock_guard
<std::mutex
> _
{mMutex
};
263 mCondVar
.notify_one();
266 bool put(const AVPacket
*pkt
)
269 std::unique_lock
<std::mutex
> lock
{mMutex
};
270 if(mTotalSize
>= SizeLimit
)
273 mPackets
.push_back(AVPacket
{});
274 if(av_packet_ref(&mPackets
.back(), pkt
) != 0)
280 mTotalSize
+= static_cast<unsigned int>(mPackets
.back().size
);
282 mCondVar
.notify_one();
293 AVStream
*mStream
{nullptr};
294 AVCodecCtxPtr mCodecCtx
;
296 PacketQueue
<2*1024*1024> mPackets
;
298 /* Used for clock difference average computation */
299 seconds_d64 mClockDiffAvg
{0};
301 /* Time of the next sample to be buffered */
302 nanoseconds mCurrentPts
{0};
304 /* Device clock time that the stream started at. */
305 nanoseconds mDeviceStartTime
{nanoseconds::min()};
307 /* Decompressed sample frame, and swresample context for conversion */
308 AVFramePtr mDecodedFrame
;
309 SwrContextPtr mSwresCtx
;
311 /* Conversion format, for what gets fed to OpenAL */
312 uint64_t mDstChanLayout
{0};
313 AVSampleFormat mDstSampleFmt
{AV_SAMPLE_FMT_NONE
};
315 /* Storage of converted samples */
316 uint8_t *mSamples
{nullptr};
317 int mSamplesLen
{0}; /* In samples */
322 ALenum mFormat
{AL_NONE
};
323 ALuint mFrameSize
{0};
325 std::mutex mSrcMutex
;
326 std::condition_variable mSrcCond
;
327 std::atomic_flag mConnected
;
329 std::vector
<ALuint
> mBuffers
;
330 ALuint mBufferIdx
{0};
332 AudioState(MovieState
&movie
) : mMovie(movie
)
333 { mConnected
.test_and_set(std::memory_order_relaxed
); }
337 alDeleteSources(1, &mSource
);
338 if(!mBuffers
.empty())
339 alDeleteBuffers(static_cast<ALsizei
>(mBuffers
.size()), mBuffers
.data());
344 #ifdef AL_SOFT_events
345 static void AL_APIENTRY
EventCallback(ALenum eventType
, ALuint object
, ALuint param
,
346 ALsizei length
, const ALchar
*message
,
350 nanoseconds
getClockNoLock();
351 nanoseconds
getClock()
353 std::lock_guard
<std::mutex
> lock
{mSrcMutex
};
354 return getClockNoLock();
357 void startPlayback();
361 bool readAudio(uint8_t *samples
, unsigned int length
);
369 AVStream
*mStream
{nullptr};
370 AVCodecCtxPtr mCodecCtx
;
372 PacketQueue
<14*1024*1024> mPackets
;
374 /* The pts of the currently displayed frame, and the time (av_gettime) it
375 * was last updated - used to have running video pts
377 nanoseconds mDisplayPts
{0};
378 microseconds mDisplayPtsTime
{microseconds::min()};
379 std::mutex mDispPtsMutex
;
381 /* Swscale context for format conversion */
382 SwsContextPtr mSwscaleCtx
;
386 nanoseconds mPts
{nanoseconds::min()};
388 std::array
<Picture
,VIDEO_PICTURE_QUEUE_SIZE
> mPictQ
;
389 std::atomic
<size_t> mPictQRead
{0u}, mPictQWrite
{1u};
390 std::mutex mPictQMutex
;
391 std::condition_variable mPictQCond
;
393 SDL_Texture
*mImage
{nullptr};
394 int mWidth
{0}, mHeight
{0}; /* Logical image size (actual size may be larger) */
395 bool mFirstUpdate
{true};
397 std::atomic
<bool> mEOS
{false};
398 std::atomic
<bool> mFinalUpdate
{false};
400 VideoState(MovieState
&movie
) : mMovie(movie
) { }
404 SDL_DestroyTexture(mImage
);
408 nanoseconds
getClock();
410 void display(SDL_Window
*screen
, SDL_Renderer
*renderer
);
411 void updateVideo(SDL_Window
*screen
, SDL_Renderer
*renderer
, bool redraw
);
416 AVIOContextPtr mIOContext
;
417 AVFormatCtxPtr mFormatCtx
;
419 SyncMaster mAVSyncType
{SyncMaster::Default
};
421 microseconds mClockBase
{microseconds::min()};
423 std::atomic
<bool> mQuit
{false};
428 std::thread mParseThread
;
429 std::thread mAudioThread
;
430 std::thread mVideoThread
;
432 std::string mFilename
;
434 MovieState(std::string fname
)
435 : mAudio(*this), mVideo(*this), mFilename(std::move(fname
))
440 if(mParseThread
.joinable())
444 static int decode_interrupt_cb(void *ctx
);
446 void setTitle(SDL_Window
*window
);
448 nanoseconds
getClock();
450 nanoseconds
getMasterClock();
452 nanoseconds
getDuration();
454 int streamComponentOpen(unsigned int stream_index
);
459 nanoseconds
AudioState::getClockNoLock()
461 // The audio clock is the timestamp of the sample currently being heard.
462 if(alcGetInteger64vSOFT
)
464 // If device start time = min, we aren't playing yet.
465 if(mDeviceStartTime
== nanoseconds::min())
466 return nanoseconds::zero();
468 // Get the current device clock time and latency.
469 auto device
= alcGetContextsDevice(alcGetCurrentContext());
470 ALCint64SOFT devtimes
[2]{0,0};
471 alcGetInteger64vSOFT(device
, ALC_DEVICE_CLOCK_LATENCY_SOFT
, 2, devtimes
);
472 auto latency
= nanoseconds
{devtimes
[1]};
473 auto device_time
= nanoseconds
{devtimes
[0]};
475 // The clock is simply the current device time relative to the recorded
476 // start time. We can also subtract the latency to get more a accurate
477 // position of where the audio device actually is in the output stream.
478 return device_time
- mDeviceStartTime
- latency
;
481 /* The source-based clock is based on 4 components:
482 * 1 - The timestamp of the next sample to buffer (mCurrentPts)
483 * 2 - The length of the source's buffer queue
484 * (AudioBufferTime*AL_BUFFERS_QUEUED)
485 * 3 - The offset OpenAL is currently at in the source (the first value
486 * from AL_SAMPLE_OFFSET_LATENCY_SOFT)
487 * 4 - The latency between OpenAL and the DAC (the second value from
488 * AL_SAMPLE_OFFSET_LATENCY_SOFT)
490 * Subtracting the length of the source queue from the next sample's
491 * timestamp gives the timestamp of the sample at the start of the source
492 * queue. Adding the source offset to that results in the timestamp for the
493 * sample at OpenAL's current position, and subtracting the source latency
494 * from that gives the timestamp of the sample currently at the DAC.
496 nanoseconds pts
{mCurrentPts
};
499 ALint64SOFT offset
[2];
501 /* NOTE: The source state must be checked last, in case an underrun
502 * occurs and the source stops between retrieving the offset+latency
503 * and getting the state. */
504 if(alGetSourcei64vSOFT
)
505 alGetSourcei64vSOFT(mSource
, AL_SAMPLE_OFFSET_LATENCY_SOFT
, offset
);
509 alGetSourcei(mSource
, AL_SAMPLE_OFFSET
, &ioffset
);
510 offset
[0] = ALint64SOFT
{ioffset
} << 32;
513 ALint queued
, status
;
514 alGetSourcei(mSource
, AL_BUFFERS_QUEUED
, &queued
);
515 alGetSourcei(mSource
, AL_SOURCE_STATE
, &status
);
517 /* If the source is AL_STOPPED, then there was an underrun and all
518 * buffers are processed, so ignore the source queue. The audio thread
519 * will put the source into an AL_INITIAL state and clear the queue
520 * when it starts recovery. */
521 if(status
!= AL_STOPPED
)
523 pts
-= AudioBufferTime
*queued
;
524 pts
+= std::chrono::duration_cast
<nanoseconds
>(
525 fixed32
{offset
[0] / mCodecCtx
->sample_rate
});
527 /* Don't offset by the latency if the source isn't playing. */
528 if(status
== AL_PLAYING
)
529 pts
-= nanoseconds
{offset
[1]};
532 return std::max(pts
, nanoseconds::zero());
535 void AudioState::startPlayback()
537 alSourcePlay(mSource
);
538 if(alcGetInteger64vSOFT
)
540 // Subtract the total buffer queue time from the current pts to get the
541 // pts of the start of the queue.
542 nanoseconds startpts
{mCurrentPts
- AudioBufferTotalTime
};
543 int64_t srctimes
[2]{0,0};
544 alGetSourcei64vSOFT(mSource
, AL_SAMPLE_OFFSET_CLOCK_SOFT
, srctimes
);
545 auto device_time
= nanoseconds
{srctimes
[1]};
546 auto src_offset
= std::chrono::duration_cast
<nanoseconds
>(fixed32
{srctimes
[0]}) /
547 mCodecCtx
->sample_rate
;
549 // The mixer may have ticked and incremented the device time and sample
550 // offset, so subtract the source offset from the device time to get
551 // the device time the source started at. Also subtract startpts to get
552 // the device time the stream would have started at to reach where it
554 mDeviceStartTime
= device_time
- src_offset
- startpts
;
558 int AudioState::getSync()
560 if(mMovie
.mAVSyncType
== SyncMaster::Audio
)
563 auto ref_clock
= mMovie
.getMasterClock();
564 auto diff
= ref_clock
- getClockNoLock();
566 if(!(diff
< AVNoSyncThreshold
&& diff
> -AVNoSyncThreshold
))
568 /* Difference is TOO big; reset accumulated average */
569 mClockDiffAvg
= seconds_d64::zero();
573 /* Accumulate the diffs */
574 mClockDiffAvg
= mClockDiffAvg
*AudioAvgFilterCoeff
+ diff
;
575 auto avg_diff
= mClockDiffAvg
*(1.0 - AudioAvgFilterCoeff
);
576 if(avg_diff
< AudioSyncThreshold
/2.0 && avg_diff
> -AudioSyncThreshold
)
579 /* Constrain the per-update difference to avoid exceedingly large skips */
580 diff
= std::min
<nanoseconds
>(diff
, AudioSampleCorrectionMax
);
581 return static_cast<int>(std::chrono::duration_cast
<seconds
>(diff
*mCodecCtx
->sample_rate
).count());
584 int AudioState::decodeFrame()
586 while(!mMovie
.mQuit
.load(std::memory_order_relaxed
))
589 while((ret
=avcodec_receive_frame(mCodecCtx
.get(), mDecodedFrame
.get())) == AVERROR(EAGAIN
))
590 mPackets
.sendTo(mCodecCtx
.get());
593 if(ret
== AVERROR_EOF
) break;
594 std::cerr
<< "Failed to receive frame: "<<ret
<<std::endl
;
598 if(mDecodedFrame
->nb_samples
<= 0)
601 /* If provided, update w/ pts */
602 if(mDecodedFrame
->best_effort_timestamp
!= AV_NOPTS_VALUE
)
603 mCurrentPts
= std::chrono::duration_cast
<nanoseconds
>(
604 seconds_d64
{av_q2d(mStream
->time_base
)*mDecodedFrame
->best_effort_timestamp
}
607 if(mDecodedFrame
->nb_samples
> mSamplesMax
)
611 &mSamples
, nullptr, mCodecCtx
->channels
,
612 mDecodedFrame
->nb_samples
, mDstSampleFmt
, 0
614 mSamplesMax
= mDecodedFrame
->nb_samples
;
616 /* Return the amount of sample frames converted */
617 int data_size
{swr_convert(mSwresCtx
.get(), &mSamples
, mDecodedFrame
->nb_samples
,
618 const_cast<const uint8_t**>(mDecodedFrame
->data
), mDecodedFrame
->nb_samples
)};
620 av_frame_unref(mDecodedFrame
.get());
627 /* Duplicates the sample at in to out, count times. The frame size is a
628 * multiple of the template type size.
631 static void sample_dup(uint8_t *out
, const uint8_t *in
, unsigned int count
, size_t frame_size
)
633 auto *sample
= reinterpret_cast<const T
*>(in
);
634 auto *dst
= reinterpret_cast<T
*>(out
);
635 if(frame_size
== sizeof(T
))
636 std::fill_n(dst
, count
, *sample
);
639 /* NOTE: frame_size is a multiple of sizeof(T). */
640 size_t type_mult
{frame_size
/ sizeof(T
)};
642 std::generate_n(dst
, count
*type_mult
,
643 [sample
,type_mult
,&i
]() -> T
654 bool AudioState::readAudio(uint8_t *samples
, unsigned int length
)
656 int sample_skip
{getSync()};
657 unsigned int audio_size
{0};
659 /* Read the next chunk of data, refill the buffer, and queue it
661 length
/= mFrameSize
;
662 while(audio_size
< length
)
664 if(mSamplesLen
<= 0 || mSamplesPos
>= mSamplesLen
)
666 int frame_len
= decodeFrame();
667 if(frame_len
<= 0) break;
669 mSamplesLen
= frame_len
;
670 mSamplesPos
= std::min(mSamplesLen
, sample_skip
);
671 sample_skip
-= mSamplesPos
;
673 // Adjust the device start time and current pts by the amount we're
674 // skipping/duplicating, so that the clock remains correct for the
675 // current stream position.
676 auto skip
= nanoseconds
{seconds
{mSamplesPos
}} / mCodecCtx
->sample_rate
;
677 mDeviceStartTime
-= skip
;
682 unsigned int rem
{length
- audio_size
};
685 const auto len
= static_cast<unsigned int>(mSamplesLen
- mSamplesPos
);
686 if(rem
> len
) rem
= len
;
687 std::copy_n(mSamples
+ static_cast<unsigned int>(mSamplesPos
)*mFrameSize
,
688 rem
*mFrameSize
, samples
);
692 rem
= std::min(rem
, static_cast<unsigned int>(-mSamplesPos
));
694 /* Add samples by copying the first sample */
695 if((mFrameSize
&7) == 0)
696 sample_dup
<uint64_t>(samples
, mSamples
, rem
, mFrameSize
);
697 else if((mFrameSize
&3) == 0)
698 sample_dup
<uint32_t>(samples
, mSamples
, rem
, mFrameSize
);
699 else if((mFrameSize
&1) == 0)
700 sample_dup
<uint16_t>(samples
, mSamples
, rem
, mFrameSize
);
702 sample_dup
<uint8_t>(samples
, mSamples
, rem
, mFrameSize
);
706 mCurrentPts
+= nanoseconds
{seconds
{rem
}} / mCodecCtx
->sample_rate
;
707 samples
+= rem
*mFrameSize
;
713 if(audio_size
< length
)
715 const unsigned int rem
{length
- audio_size
};
716 std::fill_n(samples
, rem
*mFrameSize
,
717 (mDstSampleFmt
== AV_SAMPLE_FMT_U8
) ? 0x80 : 0x00);
718 mCurrentPts
+= nanoseconds
{seconds
{rem
}} / mCodecCtx
->sample_rate
;
725 #ifdef AL_SOFT_events
726 void AL_APIENTRY
AudioState::EventCallback(ALenum eventType
, ALuint object
, ALuint param
,
727 ALsizei length
, const ALchar
*message
,
730 auto self
= static_cast<AudioState
*>(userParam
);
732 if(eventType
== AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT
)
734 /* Temporarily lock the source mutex to ensure it's not between
735 * checking the processed count and going to sleep.
737 std::unique_lock
<std::mutex
>{self
->mSrcMutex
}.unlock();
738 self
->mSrcCond
.notify_one();
742 std::cout
<< "\n---- AL Event on AudioState "<<self
<<" ----\nEvent: ";
745 case AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT
: std::cout
<< "Buffer completed"; break;
746 case AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT
: std::cout
<< "Source state changed"; break;
747 case AL_EVENT_TYPE_ERROR_SOFT
: std::cout
<< "API error"; break;
748 case AL_EVENT_TYPE_PERFORMANCE_SOFT
: std::cout
<< "Performance"; break;
749 case AL_EVENT_TYPE_DEPRECATED_SOFT
: std::cout
<< "Deprecated"; break;
750 case AL_EVENT_TYPE_DISCONNECTED_SOFT
: std::cout
<< "Disconnected"; break;
751 default: std::cout
<< "0x"<<std::hex
<<std::setw(4)<<std::setfill('0')<<eventType
<<
752 std::dec
<<std::setw(0)<<std::setfill(' '); break;
755 "Object ID: "<<object
<<"\n"
756 "Parameter: "<<param
<<"\n"
757 "Message: "<<std::string
{message
, static_cast<ALuint
>(length
)}<<"\n----"<<
760 if(eventType
== AL_EVENT_TYPE_DISCONNECTED_SOFT
)
763 std::lock_guard
<std::mutex
> lock
{self
->mSrcMutex
};
764 self
->mConnected
.clear(std::memory_order_release
);
766 self
->mSrcCond
.notify_one();
771 int AudioState::handler()
773 std::unique_lock
<std::mutex
> srclock
{mSrcMutex
, std::defer_lock
};
774 milliseconds sleep_time
{AudioBufferTime
/ 3};
777 #ifdef AL_SOFT_events
778 const std::array
<ALenum
,6> evt_types
{{
779 AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT
, AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT
,
780 AL_EVENT_TYPE_ERROR_SOFT
, AL_EVENT_TYPE_PERFORMANCE_SOFT
, AL_EVENT_TYPE_DEPRECATED_SOFT
,
781 AL_EVENT_TYPE_DISCONNECTED_SOFT
783 if(alEventControlSOFT
)
785 alEventControlSOFT(evt_types
.size(), evt_types
.data(), AL_TRUE
);
786 alEventCallbackSOFT(EventCallback
, this);
787 sleep_time
= AudioBufferTotalTime
;
790 #ifdef AL_SOFT_bformat_ex
791 const bool has_bfmt_ex
{alIsExtensionPresent("AL_SOFTX_bformat_ex") != AL_FALSE
};
792 ALenum ambi_layout
{AL_FUMA_SOFT
};
793 ALenum ambi_scale
{AL_FUMA_SOFT
};
796 /* Find a suitable format for OpenAL. */
799 if((mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_FLT
|| mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_FLTP
) &&
800 alIsExtensionPresent("AL_EXT_FLOAT32"))
802 mDstSampleFmt
= AV_SAMPLE_FMT_FLT
;
804 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_7POINT1
&&
805 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
806 (fmt
=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE
&& fmt
!= -1)
808 mDstChanLayout
= mCodecCtx
->channel_layout
;
812 if((mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1
||
813 mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1_BACK
) &&
814 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
815 (fmt
=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE
&& fmt
!= -1)
817 mDstChanLayout
= mCodecCtx
->channel_layout
;
821 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_MONO
)
823 mDstChanLayout
= mCodecCtx
->channel_layout
;
825 mFormat
= AL_FORMAT_MONO_FLOAT32
;
827 /* Assume 3D B-Format (ambisonics) if the channel layout is blank and
828 * there's 4 or more channels. FFmpeg/libavcodec otherwise seems to
829 * have no way to specify if the source is actually B-Format (let alone
832 if(mCodecCtx
->channel_layout
== 0 && mCodecCtx
->channels
>= 4 &&
833 alIsExtensionPresent("AL_EXT_BFORMAT") &&
834 (fmt
=alGetEnumValue("AL_FORMAT_BFORMAT3D_FLOAT32")) != AL_NONE
&& fmt
!= -1)
836 int order
{static_cast<int>(std::sqrt(mCodecCtx
->channels
)) - 1};
837 if((order
+1)*(order
+1) == mCodecCtx
->channels
||
838 (order
+1)*(order
+1) + 2 == mCodecCtx
->channels
)
840 /* OpenAL only supports first-order with AL_EXT_BFORMAT, which
841 * is 4 channels for 3D buffers.
849 mDstChanLayout
= AV_CH_LAYOUT_STEREO
;
851 mFormat
= AL_FORMAT_STEREO_FLOAT32
;
854 if(mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_U8
|| mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_U8P
)
856 mDstSampleFmt
= AV_SAMPLE_FMT_U8
;
858 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_7POINT1
&&
859 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
860 (fmt
=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE
&& fmt
!= -1)
862 mDstChanLayout
= mCodecCtx
->channel_layout
;
866 if((mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1
||
867 mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1_BACK
) &&
868 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
869 (fmt
=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE
&& fmt
!= -1)
871 mDstChanLayout
= mCodecCtx
->channel_layout
;
875 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_MONO
)
877 mDstChanLayout
= mCodecCtx
->channel_layout
;
879 mFormat
= AL_FORMAT_MONO8
;
881 if(mCodecCtx
->channel_layout
== 0 && mCodecCtx
->channels
>= 4 &&
882 alIsExtensionPresent("AL_EXT_BFORMAT") &&
883 (fmt
=alGetEnumValue("AL_FORMAT_BFORMAT3D8")) != AL_NONE
&& fmt
!= -1)
885 int order
{static_cast<int>(std::sqrt(mCodecCtx
->channels
)) - 1};
886 if((order
+1)*(order
+1) == mCodecCtx
->channels
||
887 (order
+1)*(order
+1) + 2 == mCodecCtx
->channels
)
895 mDstChanLayout
= AV_CH_LAYOUT_STEREO
;
897 mFormat
= AL_FORMAT_STEREO8
;
902 mDstSampleFmt
= AV_SAMPLE_FMT_S16
;
904 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_7POINT1
&&
905 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
906 (fmt
=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE
&& fmt
!= -1)
908 mDstChanLayout
= mCodecCtx
->channel_layout
;
912 if((mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1
||
913 mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1_BACK
) &&
914 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
915 (fmt
=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE
&& fmt
!= -1)
917 mDstChanLayout
= mCodecCtx
->channel_layout
;
921 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_MONO
)
923 mDstChanLayout
= mCodecCtx
->channel_layout
;
925 mFormat
= AL_FORMAT_MONO16
;
927 if(mCodecCtx
->channel_layout
== 0 && mCodecCtx
->channels
>= 4 &&
928 alIsExtensionPresent("AL_EXT_BFORMAT") &&
929 (fmt
=alGetEnumValue("AL_FORMAT_BFORMAT3D16")) != AL_NONE
&& fmt
!= -1)
931 int order
{static_cast<int>(std::sqrt(mCodecCtx
->channels
)) - 1};
932 if((order
+1)*(order
+1) == mCodecCtx
->channels
||
933 (order
+1)*(order
+1) + 2 == mCodecCtx
->channels
)
941 mDstChanLayout
= AV_CH_LAYOUT_STEREO
;
943 mFormat
= AL_FORMAT_STEREO16
;
946 void *samples
{nullptr};
947 ALsizei buffer_len
= static_cast<int>(std::chrono::duration_cast
<seconds
>(
948 mCodecCtx
->sample_rate
* AudioBufferTime
).count() * mFrameSize
);
955 mDecodedFrame
.reset(av_frame_alloc());
958 std::cerr
<< "Failed to allocate audio frame" <<std::endl
;
964 /* OpenAL only supports first-order ambisonics with AL_EXT_BFORMAT, so
965 * we have to drop any extra channels.
967 mSwresCtx
.reset(swr_alloc_set_opts(nullptr,
968 (1_i64
<<4)-1, mDstSampleFmt
, mCodecCtx
->sample_rate
,
969 (1_i64
<<mCodecCtx
->channels
)-1, mCodecCtx
->sample_fmt
, mCodecCtx
->sample_rate
,
972 /* Note that ffmpeg/libavcodec has no method to check the ambisonic
973 * channel order and normalization, so we can only assume AmbiX as the
974 * defacto-standard. This is not true for .amb files, which use FuMa.
976 #ifdef AL_SOFT_bformat_ex
977 ambi_layout
= AL_ACN_SOFT
;
978 ambi_scale
= AL_SN3D_SOFT
;
982 /* Without AL_SOFT_bformat_ex, OpenAL only supports FuMa channel
983 * ordering and normalization, so a custom matrix is needed to
984 * scale and reorder the source from AmbiX.
986 std::vector
<double> mtx(64*64, 0.0);
987 mtx
[0 + 0*64] = std::sqrt(0.5);
991 swr_set_matrix(mSwresCtx
.get(), mtx
.data(), 64);
995 mSwresCtx
.reset(swr_alloc_set_opts(nullptr,
996 static_cast<int64_t>(mDstChanLayout
), mDstSampleFmt
, mCodecCtx
->sample_rate
,
997 mCodecCtx
->channel_layout
? static_cast<int64_t>(mCodecCtx
->channel_layout
) :
998 av_get_default_channel_layout(mCodecCtx
->channels
),
999 mCodecCtx
->sample_fmt
, mCodecCtx
->sample_rate
,
1001 if(!mSwresCtx
|| swr_init(mSwresCtx
.get()) != 0)
1003 std::cerr
<< "Failed to initialize audio converter" <<std::endl
;
1007 mBuffers
.assign(AudioBufferTotalTime
/ AudioBufferTime
, 0);
1008 alGenBuffers(static_cast<ALsizei
>(mBuffers
.size()), mBuffers
.data());
1009 alGenSources(1, &mSource
);
1012 alSourcei(mSource
, AL_DIRECT_CHANNELS_SOFT
, AL_TRUE
);
1013 if (EnableWideStereo
) {
1014 ALfloat angles
[2] = {static_cast<ALfloat
>(M_PI
/ 3.0),
1015 static_cast<ALfloat
>(-M_PI
/ 3.0)};
1016 alSourcefv(mSource
, AL_STEREO_ANGLES
, angles
);
1019 if(alGetError() != AL_NO_ERROR
)
1022 #ifdef AL_SOFT_bformat_ex
1025 for(ALuint bufid
: mBuffers
)
1027 alBufferi(bufid
, AL_AMBISONIC_LAYOUT_SOFT
, ambi_layout
);
1028 alBufferi(bufid
, AL_AMBISONIC_SCALING_SOFT
, ambi_scale
);
1032 #ifdef AL_SOFT_map_buffer
1033 if(alBufferStorageSOFT
)
1035 for(ALuint bufid
: mBuffers
)
1036 alBufferStorageSOFT(bufid
, mFormat
, nullptr, buffer_len
, mCodecCtx
->sample_rate
,
1037 AL_MAP_WRITE_BIT_SOFT
);
1038 if(alGetError() != AL_NO_ERROR
)
1040 fprintf(stderr
, "Failed to use mapped buffers\n");
1041 samples
= av_malloc(static_cast<ALuint
>(buffer_len
));
1046 samples
= av_malloc(static_cast<ALuint
>(buffer_len
));
1048 /* Prefill the codec buffer. */
1050 const int ret
{mPackets
.sendTo(mCodecCtx
.get())};
1051 if(ret
== AVERROR(EAGAIN
) || ret
== AVERROR_EOF
)
1056 if(alcGetInteger64vSOFT
)
1059 alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()), ALC_DEVICE_CLOCK_SOFT
,
1061 mDeviceStartTime
= nanoseconds
{devtime
} - mCurrentPts
;
1063 while(alGetError() == AL_NO_ERROR
&& !mMovie
.mQuit
.load(std::memory_order_relaxed
) &&
1064 mConnected
.test_and_set(std::memory_order_relaxed
))
1066 /* First remove any processed buffers. */
1068 alGetSourcei(mSource
, AL_BUFFERS_PROCESSED
, &processed
);
1069 while(processed
> 0)
1071 std::array
<ALuint
,4> bids
;
1072 const ALsizei todq
{std::min
<ALsizei
>(bids
.size(), processed
)};
1073 alSourceUnqueueBuffers(mSource
, todq
, bids
.data());
1077 /* Refill the buffer queue. */
1079 alGetSourcei(mSource
, AL_BUFFERS_QUEUED
, &queued
);
1080 while(static_cast<ALuint
>(queued
) < mBuffers
.size())
1082 const ALuint bufid
{mBuffers
[mBufferIdx
]};
1083 /* Read the next chunk of data, filling the buffer, and queue it on
1086 #ifdef AL_SOFT_map_buffer
1089 auto ptr
= static_cast<uint8_t*>(alMapBufferSOFT(bufid
, 0, buffer_len
,
1090 AL_MAP_WRITE_BIT_SOFT
));
1091 bool got_audio
{readAudio(ptr
, static_cast<unsigned int>(buffer_len
))};
1092 alUnmapBufferSOFT(bufid
);
1093 if(!got_audio
) break;
1098 auto ptr
= static_cast<uint8_t*>(samples
);
1099 if(!readAudio(ptr
, static_cast<unsigned int>(buffer_len
)))
1101 alBufferData(bufid
, mFormat
, samples
, buffer_len
, mCodecCtx
->sample_rate
);
1104 alSourceQueueBuffers(mSource
, 1, &bufid
);
1105 mBufferIdx
= (mBufferIdx
+1) % mBuffers
.size();
1111 /* Check that the source is playing. */
1113 alGetSourcei(mSource
, AL_SOURCE_STATE
, &state
);
1114 if(state
== AL_STOPPED
)
1116 /* AL_STOPPED means there was an underrun. Clear the buffer queue
1117 * since this likely means we're late, and rewind the source to get
1118 * it back into an AL_INITIAL state.
1120 alSourceRewind(mSource
);
1121 alSourcei(mSource
, AL_BUFFER
, 0);
1122 if(alcGetInteger64vSOFT
)
1124 /* Also update the device start time with the current device
1125 * clock, so the decoder knows we're running behind.
1128 alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()),
1129 ALC_DEVICE_CLOCK_SOFT
, 1, &devtime
);
1130 mDeviceStartTime
= nanoseconds
{devtime
} - mCurrentPts
;
1135 /* (re)start the source if needed, and wait for a buffer to finish */
1136 if(state
!= AL_PLAYING
&& state
!= AL_PAUSED
)
1139 mSrcCond
.wait_for(srclock
, sleep_time
);
1142 alSourceRewind(mSource
);
1143 alSourcei(mSource
, AL_BUFFER
, 0);
1149 #ifdef AL_SOFT_events
1150 if(alEventControlSOFT
)
1152 alEventControlSOFT(evt_types
.size(), evt_types
.data(), AL_FALSE
);
1153 alEventCallbackSOFT(nullptr, nullptr);
1161 nanoseconds
VideoState::getClock()
1163 /* NOTE: This returns incorrect times while not playing. */
1164 std::lock_guard
<std::mutex
> _
{mDispPtsMutex
};
1165 if(mDisplayPtsTime
== microseconds::min())
1166 return nanoseconds::zero();
1167 auto delta
= get_avtime() - mDisplayPtsTime
;
1168 return mDisplayPts
+ delta
;
1171 /* Called by VideoState::updateVideo to display the next video frame. */
1172 void VideoState::display(SDL_Window
*screen
, SDL_Renderer
*renderer
)
1177 double aspect_ratio
;
1181 if(mCodecCtx
->sample_aspect_ratio
.num
== 0)
1185 aspect_ratio
= av_q2d(mCodecCtx
->sample_aspect_ratio
) * mCodecCtx
->width
/
1188 if(aspect_ratio
<= 0.0)
1189 aspect_ratio
= static_cast<double>(mCodecCtx
->width
) / mCodecCtx
->height
;
1191 SDL_GetWindowSize(screen
, &win_w
, &win_h
);
1193 w
= (static_cast<int>(std::rint(h
* aspect_ratio
)) + 3) & ~3;
1197 h
= (static_cast<int>(std::rint(w
/ aspect_ratio
)) + 3) & ~3;
1199 x
= (win_w
- w
) / 2;
1200 y
= (win_h
- h
) / 2;
1202 SDL_Rect src_rect
{ 0, 0, mWidth
, mHeight
};
1203 SDL_Rect dst_rect
{ x
, y
, w
, h
};
1204 SDL_RenderCopy(renderer
, mImage
, &src_rect
, &dst_rect
);
1205 SDL_RenderPresent(renderer
);
1208 /* Called regularly on the main thread where the SDL_Renderer was created. It
1209 * handles updating the textures of decoded frames and displaying the latest
1212 void VideoState::updateVideo(SDL_Window
*screen
, SDL_Renderer
*renderer
, bool redraw
)
1214 size_t read_idx
{mPictQRead
.load(std::memory_order_relaxed
)};
1215 Picture
*vp
{&mPictQ
[read_idx
]};
1217 auto clocktime
= mMovie
.getMasterClock();
1218 bool updated
{false};
1221 size_t next_idx
{(read_idx
+1)%mPictQ
.size()};
1222 if(next_idx
== mPictQWrite
.load(std::memory_order_acquire
))
1224 Picture
*nextvp
{&mPictQ
[next_idx
]};
1225 if(clocktime
< nextvp
->mPts
)
1230 read_idx
= next_idx
;
1232 if(mMovie
.mQuit
.load(std::memory_order_relaxed
))
1235 mFinalUpdate
= true;
1236 mPictQRead
.store(read_idx
, std::memory_order_release
);
1237 std::unique_lock
<std::mutex
>{mPictQMutex
}.unlock();
1238 mPictQCond
.notify_one();
1244 mPictQRead
.store(read_idx
, std::memory_order_release
);
1245 std::unique_lock
<std::mutex
>{mPictQMutex
}.unlock();
1246 mPictQCond
.notify_one();
1248 /* allocate or resize the buffer! */
1249 bool fmt_updated
{false};
1250 if(!mImage
|| mWidth
!= mCodecCtx
->width
|| mHeight
!= mCodecCtx
->height
)
1254 SDL_DestroyTexture(mImage
);
1255 mImage
= SDL_CreateTexture(renderer
, SDL_PIXELFORMAT_IYUV
, SDL_TEXTUREACCESS_STREAMING
,
1256 mCodecCtx
->coded_width
, mCodecCtx
->coded_height
);
1258 std::cerr
<< "Failed to create YV12 texture!" <<std::endl
;
1259 mWidth
= mCodecCtx
->width
;
1260 mHeight
= mCodecCtx
->height
;
1262 if(mFirstUpdate
&& mWidth
> 0 && mHeight
> 0)
1264 /* For the first update, set the window size to the video size. */
1265 mFirstUpdate
= false;
1269 if(mCodecCtx
->sample_aspect_ratio
.den
!= 0)
1271 double aspect_ratio
= av_q2d(mCodecCtx
->sample_aspect_ratio
);
1272 if(aspect_ratio
>= 1.0)
1273 w
= static_cast<int>(w
*aspect_ratio
+ 0.5);
1274 else if(aspect_ratio
> 0.0)
1275 h
= static_cast<int>(h
/aspect_ratio
+ 0.5);
1277 SDL_SetWindowSize(screen
, w
, h
);
1283 AVFrame
*frame
{vp
->mFrame
.get()};
1284 void *pixels
{nullptr};
1287 if(mCodecCtx
->pix_fmt
== AV_PIX_FMT_YUV420P
)
1288 SDL_UpdateYUVTexture(mImage
, nullptr,
1289 frame
->data
[0], frame
->linesize
[0],
1290 frame
->data
[1], frame
->linesize
[1],
1291 frame
->data
[2], frame
->linesize
[2]
1293 else if(SDL_LockTexture(mImage
, nullptr, &pixels
, &pitch
) != 0)
1294 std::cerr
<< "Failed to lock texture" <<std::endl
;
1297 // Convert the image into YUV format that SDL uses
1298 int coded_w
{mCodecCtx
->coded_width
};
1299 int coded_h
{mCodecCtx
->coded_height
};
1300 int w
{mCodecCtx
->width
};
1301 int h
{mCodecCtx
->height
};
1302 if(!mSwscaleCtx
|| fmt_updated
)
1304 mSwscaleCtx
.reset(sws_getContext(
1305 w
, h
, mCodecCtx
->pix_fmt
,
1306 w
, h
, AV_PIX_FMT_YUV420P
, 0,
1307 nullptr, nullptr, nullptr
1311 /* point pict at the queue */
1312 uint8_t *pict_data
[3];
1313 pict_data
[0] = static_cast<uint8_t*>(pixels
);
1314 pict_data
[1] = pict_data
[0] + coded_w
*coded_h
;
1315 pict_data
[2] = pict_data
[1] + coded_w
*coded_h
/4;
1317 int pict_linesize
[3];
1318 pict_linesize
[0] = pitch
;
1319 pict_linesize
[1] = pitch
/ 2;
1320 pict_linesize
[2] = pitch
/ 2;
1322 sws_scale(mSwscaleCtx
.get(), reinterpret_cast<uint8_t**>(frame
->data
), frame
->linesize
,
1323 0, h
, pict_data
, pict_linesize
);
1324 SDL_UnlockTexture(mImage
);
1333 /* Show the picture! */
1334 display(screen
, renderer
);
1339 auto disp_time
= get_avtime();
1341 std::lock_guard
<std::mutex
> _
{mDispPtsMutex
};
1342 mDisplayPts
= vp
->mPts
;
1343 mDisplayPtsTime
= disp_time
;
1345 if(mEOS
.load(std::memory_order_acquire
))
1347 if((read_idx
+1)%mPictQ
.size() == mPictQWrite
.load(std::memory_order_acquire
))
1349 mFinalUpdate
= true;
1350 std::unique_lock
<std::mutex
>{mPictQMutex
}.unlock();
1351 mPictQCond
.notify_one();
1356 int VideoState::handler()
1358 std::for_each(mPictQ
.begin(), mPictQ
.end(),
1359 [](Picture
&pict
) -> void
1360 { pict
.mFrame
= AVFramePtr
{av_frame_alloc()}; });
1362 /* Prefill the codec buffer. */
1364 const int ret
{mPackets
.sendTo(mCodecCtx
.get())};
1365 if(ret
== AVERROR(EAGAIN
) || ret
== AVERROR_EOF
)
1370 std::lock_guard
<std::mutex
> _
{mDispPtsMutex
};
1371 mDisplayPtsTime
= get_avtime();
1374 auto current_pts
= nanoseconds::zero();
1375 while(!mMovie
.mQuit
.load(std::memory_order_relaxed
))
1377 size_t write_idx
{mPictQWrite
.load(std::memory_order_relaxed
)};
1378 Picture
*vp
{&mPictQ
[write_idx
]};
1380 /* Retrieve video frame. */
1381 AVFrame
*decoded_frame
{vp
->mFrame
.get()};
1383 while((ret
=avcodec_receive_frame(mCodecCtx
.get(), decoded_frame
)) == AVERROR(EAGAIN
))
1384 mPackets
.sendTo(mCodecCtx
.get());
1387 if(ret
== AVERROR_EOF
) break;
1388 std::cerr
<< "Failed to receive frame: "<<ret
<<std::endl
;
1392 /* Get the PTS for this frame. */
1393 if(decoded_frame
->best_effort_timestamp
!= AV_NOPTS_VALUE
)
1394 current_pts
= std::chrono::duration_cast
<nanoseconds
>(
1395 seconds_d64
{av_q2d(mStream
->time_base
)*decoded_frame
->best_effort_timestamp
});
1396 vp
->mPts
= current_pts
;
1398 /* Update the video clock to the next expected PTS. */
1399 auto frame_delay
= av_q2d(mCodecCtx
->time_base
);
1400 frame_delay
+= decoded_frame
->repeat_pict
* (frame_delay
* 0.5);
1401 current_pts
+= std::chrono::duration_cast
<nanoseconds
>(seconds_d64
{frame_delay
});
1403 /* Put the frame in the queue to be loaded into a texture and displayed
1404 * by the rendering thread.
1406 write_idx
= (write_idx
+1)%mPictQ
.size();
1407 mPictQWrite
.store(write_idx
, std::memory_order_release
);
1409 /* Send a packet now so it's hopefully ready by the time it's needed. */
1410 mPackets
.sendTo(mCodecCtx
.get());
1412 if(write_idx
== mPictQRead
.load(std::memory_order_acquire
))
1414 /* Wait until we have space for a new pic */
1415 std::unique_lock
<std::mutex
> lock
{mPictQMutex
};
1416 while(write_idx
== mPictQRead
.load(std::memory_order_acquire
) &&
1417 !mMovie
.mQuit
.load(std::memory_order_relaxed
))
1418 mPictQCond
.wait(lock
);
1423 std::unique_lock
<std::mutex
> lock
{mPictQMutex
};
1424 while(!mFinalUpdate
) mPictQCond
.wait(lock
);
1430 int MovieState::decode_interrupt_cb(void *ctx
)
1432 return static_cast<MovieState
*>(ctx
)->mQuit
.load(std::memory_order_relaxed
);
1435 bool MovieState::prepare()
1437 AVIOContext
*avioctx
{nullptr};
1438 AVIOInterruptCB intcb
{decode_interrupt_cb
, this};
1439 if(avio_open2(&avioctx
, mFilename
.c_str(), AVIO_FLAG_READ
, &intcb
, nullptr))
1441 std::cerr
<< "Failed to open "<<mFilename
<<std::endl
;
1444 mIOContext
.reset(avioctx
);
1446 /* Open movie file. If avformat_open_input fails it will automatically free
1447 * this context, so don't set it onto a smart pointer yet.
1449 AVFormatContext
*fmtctx
{avformat_alloc_context()};
1450 fmtctx
->pb
= mIOContext
.get();
1451 fmtctx
->interrupt_callback
= intcb
;
1452 if(avformat_open_input(&fmtctx
, mFilename
.c_str(), nullptr, nullptr) != 0)
1454 std::cerr
<< "Failed to open "<<mFilename
<<std::endl
;
1457 mFormatCtx
.reset(fmtctx
);
1459 /* Retrieve stream information */
1460 if(avformat_find_stream_info(mFormatCtx
.get(), nullptr) < 0)
1462 std::cerr
<< mFilename
<<": failed to find stream info" <<std::endl
;
1466 mParseThread
= std::thread
{std::mem_fn(&MovieState::parse_handler
), this};
1470 void MovieState::setTitle(SDL_Window
*window
)
1472 auto pos1
= mFilename
.rfind('/');
1473 auto pos2
= mFilename
.rfind('\\');
1474 auto fpos
= ((pos1
== std::string::npos
) ? pos2
:
1475 (pos2
== std::string::npos
) ? pos1
:
1476 std::max(pos1
, pos2
)) + 1;
1477 SDL_SetWindowTitle(window
, (mFilename
.substr(fpos
)+" - "+AppName
).c_str());
1480 nanoseconds
MovieState::getClock()
1482 if(mClockBase
== microseconds::min())
1483 return nanoseconds::zero();
1484 return get_avtime() - mClockBase
;
1487 nanoseconds
MovieState::getMasterClock()
1489 if(mAVSyncType
== SyncMaster::Video
)
1490 return mVideo
.getClock();
1491 if(mAVSyncType
== SyncMaster::Audio
)
1492 return mAudio
.getClock();
1496 nanoseconds
MovieState::getDuration()
1497 { return std::chrono::duration
<int64_t,std::ratio
<1,AV_TIME_BASE
>>(mFormatCtx
->duration
); }
1499 int MovieState::streamComponentOpen(unsigned int stream_index
)
1501 if(stream_index
>= mFormatCtx
->nb_streams
)
1504 /* Get a pointer to the codec context for the stream, and open the
1507 AVCodecCtxPtr avctx
{avcodec_alloc_context3(nullptr)};
1508 if(!avctx
) return -1;
1510 if(avcodec_parameters_to_context(avctx
.get(), mFormatCtx
->streams
[stream_index
]->codecpar
))
1513 AVCodec
*codec
{avcodec_find_decoder(avctx
->codec_id
)};
1514 if(!codec
|| avcodec_open2(avctx
.get(), codec
, nullptr) < 0)
1516 std::cerr
<< "Unsupported codec: "<<avcodec_get_name(avctx
->codec_id
)
1517 << " (0x"<<std::hex
<<avctx
->codec_id
<<std::dec
<<")" <<std::endl
;
1521 /* Initialize and start the media type handler */
1522 switch(avctx
->codec_type
)
1524 case AVMEDIA_TYPE_AUDIO
:
1525 mAudio
.mStream
= mFormatCtx
->streams
[stream_index
];
1526 mAudio
.mCodecCtx
= std::move(avctx
);
1529 case AVMEDIA_TYPE_VIDEO
:
1530 mVideo
.mStream
= mFormatCtx
->streams
[stream_index
];
1531 mVideo
.mCodecCtx
= std::move(avctx
);
1538 return static_cast<int>(stream_index
);
1541 int MovieState::parse_handler()
1543 auto &audio_queue
= mAudio
.mPackets
;
1544 auto &video_queue
= mVideo
.mPackets
;
1546 int video_index
{-1};
1547 int audio_index
{-1};
1549 /* Dump information about file onto standard error */
1550 av_dump_format(mFormatCtx
.get(), 0, mFilename
.c_str(), 0);
1552 /* Find the first video and audio streams */
1553 for(unsigned int i
{0u};i
< mFormatCtx
->nb_streams
;i
++)
1555 auto codecpar
= mFormatCtx
->streams
[i
]->codecpar
;
1556 if(codecpar
->codec_type
== AVMEDIA_TYPE_VIDEO
&& !DisableVideo
&& video_index
< 0)
1557 video_index
= streamComponentOpen(i
);
1558 else if(codecpar
->codec_type
== AVMEDIA_TYPE_AUDIO
&& audio_index
< 0)
1559 audio_index
= streamComponentOpen(i
);
1562 if(video_index
< 0 && audio_index
< 0)
1564 std::cerr
<< mFilename
<<": could not open codecs" <<std::endl
;
1568 /* Set the base time 750ms ahead of the current av time. */
1569 mClockBase
= get_avtime() + milliseconds
{750};
1571 if(audio_index
>= 0)
1572 mAudioThread
= std::thread
{std::mem_fn(&AudioState::handler
), &mAudio
};
1573 if(video_index
>= 0)
1574 mVideoThread
= std::thread
{std::mem_fn(&VideoState::handler
), &mVideo
};
1576 /* Main packet reading/dispatching loop */
1577 while(!mQuit
.load(std::memory_order_relaxed
))
1580 if(av_read_frame(mFormatCtx
.get(), &packet
) < 0)
1583 /* Copy the packet into the queue it's meant for. */
1584 if(packet
.stream_index
== video_index
)
1586 while(!mQuit
.load(std::memory_order_acquire
) && !video_queue
.put(&packet
))
1587 std::this_thread::sleep_for(milliseconds
{100});
1589 else if(packet
.stream_index
== audio_index
)
1591 while(!mQuit
.load(std::memory_order_acquire
) && !audio_queue
.put(&packet
))
1592 std::this_thread::sleep_for(milliseconds
{100});
1595 av_packet_unref(&packet
);
1597 /* Finish the queues so the receivers know nothing more is coming. */
1598 if(mVideo
.mCodecCtx
) video_queue
.setFinished();
1599 if(mAudio
.mCodecCtx
) audio_queue
.setFinished();
1601 /* all done - wait for it */
1602 if(mVideoThread
.joinable())
1603 mVideoThread
.join();
1604 if(mAudioThread
.joinable())
1605 mAudioThread
.join();
1608 std::unique_lock
<std::mutex
> lock
{mVideo
.mPictQMutex
};
1609 while(!mVideo
.mFinalUpdate
)
1610 mVideo
.mPictQCond
.wait(lock
);
1614 evt
.user
.type
= FF_MOVIE_DONE_EVENT
;
1615 SDL_PushEvent(&evt
);
1621 // Helper class+method to print the time with human-readable formatting.
1625 std::ostream
&operator<<(std::ostream
&os
, const PrettyTime
&rhs
)
1627 using hours
= std::chrono::hours
;
1628 using minutes
= std::chrono::minutes
;
1629 using std::chrono::duration_cast
;
1631 seconds t
{rhs
.mTime
};
1638 // Only handle up to hour formatting
1640 os
<< duration_cast
<hours
>(t
).count() << 'h' << std::setfill('0') << std::setw(2)
1641 << (duration_cast
<minutes
>(t
).count() % 60) << 'm';
1643 os
<< duration_cast
<minutes
>(t
).count() << 'm' << std::setfill('0');
1644 os
<< std::setw(2) << (duration_cast
<seconds
>(t
).count() % 60) << 's' << std::setw(0)
1645 << std::setfill(' ');
1652 int main(int argc
, char *argv
[])
1654 std::unique_ptr
<MovieState
> movState
;
1658 std::cerr
<< "Usage: "<<argv
[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl
;
1661 /* Register all formats and codecs */
1662 #if !(LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(58, 9, 100))
1665 /* Initialize networking protocols */
1666 avformat_network_init();
1668 if(SDL_Init(SDL_INIT_VIDEO
| SDL_INIT_EVENTS
))
1670 std::cerr
<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl
;
1674 /* Make a window to put our video */
1675 SDL_Window
*screen
{SDL_CreateWindow(AppName
.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE
)};
1678 std::cerr
<< "SDL: could not set video mode - exiting" <<std::endl
;
1681 /* Make a renderer to handle the texture image surface and rendering. */
1682 Uint32 render_flags
{SDL_RENDERER_ACCELERATED
| SDL_RENDERER_PRESENTVSYNC
};
1683 SDL_Renderer
*renderer
{SDL_CreateRenderer(screen
, -1, render_flags
)};
1686 SDL_RendererInfo rinf
{};
1689 /* Make sure the renderer supports IYUV textures. If not, fallback to a
1690 * software renderer. */
1691 if(SDL_GetRendererInfo(renderer
, &rinf
) == 0)
1693 for(Uint32 i
{0u};!ok
&& i
< rinf
.num_texture_formats
;i
++)
1694 ok
= (rinf
.texture_formats
[i
] == SDL_PIXELFORMAT_IYUV
);
1698 std::cerr
<< "IYUV pixelformat textures not supported on renderer "<<rinf
.name
<<std::endl
;
1699 SDL_DestroyRenderer(renderer
);
1705 render_flags
= SDL_RENDERER_SOFTWARE
| SDL_RENDERER_PRESENTVSYNC
;
1706 renderer
= SDL_CreateRenderer(screen
, -1, render_flags
);
1710 std::cerr
<< "SDL: could not create renderer - exiting" <<std::endl
;
1713 SDL_SetRenderDrawColor(renderer
, 0, 0, 0, 255);
1714 SDL_RenderFillRect(renderer
, nullptr);
1715 SDL_RenderPresent(renderer
);
1717 /* Open an audio device */
1719 if(InitAL(&argv
, &argc
))
1721 std::cerr
<< "Failed to set up audio device" <<std::endl
;
1726 auto device
= alcGetContextsDevice(alcGetCurrentContext());
1727 if(alcIsExtensionPresent(device
, "ALC_SOFT_device_clock"))
1729 std::cout
<< "Found ALC_SOFT_device_clock" <<std::endl
;
1730 alcGetInteger64vSOFT
= reinterpret_cast<LPALCGETINTEGER64VSOFT
>(
1731 alcGetProcAddress(device
, "alcGetInteger64vSOFT")
1736 if(alIsExtensionPresent("AL_SOFT_source_latency"))
1738 std::cout
<< "Found AL_SOFT_source_latency" <<std::endl
;
1739 alGetSourcei64vSOFT
= reinterpret_cast<LPALGETSOURCEI64VSOFT
>(
1740 alGetProcAddress("alGetSourcei64vSOFT")
1743 #ifdef AL_SOFT_map_buffer
1744 if(alIsExtensionPresent("AL_SOFTX_map_buffer"))
1746 std::cout
<< "Found AL_SOFT_map_buffer" <<std::endl
;
1747 alBufferStorageSOFT
= reinterpret_cast<LPALBUFFERSTORAGESOFT
>(
1748 alGetProcAddress("alBufferStorageSOFT"));
1749 alMapBufferSOFT
= reinterpret_cast<LPALMAPBUFFERSOFT
>(
1750 alGetProcAddress("alMapBufferSOFT"));
1751 alUnmapBufferSOFT
= reinterpret_cast<LPALUNMAPBUFFERSOFT
>(
1752 alGetProcAddress("alUnmapBufferSOFT"));
1755 #ifdef AL_SOFT_events
1756 if(alIsExtensionPresent("AL_SOFTX_events"))
1758 std::cout
<< "Found AL_SOFT_events" <<std::endl
;
1759 alEventControlSOFT
= reinterpret_cast<LPALEVENTCONTROLSOFT
>(
1760 alGetProcAddress("alEventControlSOFT"));
1761 alEventCallbackSOFT
= reinterpret_cast<LPALEVENTCALLBACKSOFT
>(
1762 alGetProcAddress("alEventCallbackSOFT"));
1767 for(;fileidx
< argc
;++fileidx
)
1769 if(strcmp(argv
[fileidx
], "-direct") == 0)
1771 if(!alIsExtensionPresent("AL_SOFT_direct_channels"))
1772 std::cerr
<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl
;
1775 std::cout
<< "Found AL_SOFT_direct_channels" <<std::endl
;
1776 EnableDirectOut
= true;
1779 else if(strcmp(argv
[fileidx
], "-wide") == 0)
1781 if(!alIsExtensionPresent("AL_EXT_STEREO_ANGLES"))
1782 std::cerr
<< "AL_EXT_STEREO_ANGLES not supported for wide stereo" <<std::endl
;
1785 std::cout
<< "Found AL_EXT_STEREO_ANGLES" <<std::endl
;
1786 EnableWideStereo
= true;
1789 else if(strcmp(argv
[fileidx
], "-novideo") == 0)
1790 DisableVideo
= true;
1795 while(fileidx
< argc
&& !movState
)
1797 movState
= std::unique_ptr
<MovieState
>{new MovieState
{argv
[fileidx
++]}};
1798 if(!movState
->prepare()) movState
= nullptr;
1802 std::cerr
<< "Could not start a video" <<std::endl
;
1805 movState
->setTitle(screen
);
1807 /* Default to going to the next movie at the end of one. */
1808 enum class EomAction
{
1810 } eom_action
{EomAction::Next
};
1811 seconds last_time
{seconds::min()};
1815 int have_evt
{SDL_WaitEventTimeout(&event
, 10)};
1817 auto cur_time
= std::chrono::duration_cast
<seconds
>(movState
->getMasterClock());
1818 if(cur_time
!= last_time
)
1820 auto end_time
= std::chrono::duration_cast
<seconds
>(movState
->getDuration());
1821 std::cout
<< " \r "<<PrettyTime
{cur_time
}<<" / "<<PrettyTime
{end_time
} <<std::flush
;
1822 last_time
= cur_time
;
1825 bool force_redraw
{false};
1830 switch(event
.key
.keysym
.sym
)
1833 movState
->mQuit
= true;
1834 eom_action
= EomAction::Quit
;
1838 movState
->mQuit
= true;
1839 eom_action
= EomAction::Next
;
1847 case SDL_WINDOWEVENT
:
1848 switch(event
.window
.event
)
1850 case SDL_WINDOWEVENT_RESIZED
:
1851 SDL_SetRenderDrawColor(renderer
, 0, 0, 0, 255);
1852 SDL_RenderFillRect(renderer
, nullptr);
1853 force_redraw
= true;
1856 case SDL_WINDOWEVENT_EXPOSED
:
1857 force_redraw
= true;
1866 movState
->mQuit
= true;
1867 eom_action
= EomAction::Quit
;
1870 case FF_MOVIE_DONE_EVENT
:
1872 last_time
= seconds::min();
1873 if(eom_action
!= EomAction::Quit
)
1876 while(fileidx
< argc
&& !movState
)
1878 movState
= std::unique_ptr
<MovieState
>{new MovieState
{argv
[fileidx
++]}};
1879 if(!movState
->prepare()) movState
= nullptr;
1883 movState
->setTitle(screen
);
1888 /* Nothing more to play. Shut everything down and quit. */
1893 SDL_DestroyRenderer(renderer
);
1895 SDL_DestroyWindow(screen
);
1904 } while(SDL_PollEvent(&event
));
1906 movState
->mVideo
.updateVideo(screen
, renderer
, force_redraw
);
1909 std::cerr
<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl
;