Constify the AVCodec* returned by avcodec_find_decoder
[openal-soft.git] / examples / alffplay.cpp
blob806ef10cd8ed6432c1efe5cf0e24a8eaacb55217
1 /*
2 * An example showing how to play a stream sync'd to video, using ffmpeg.
4 * Requires C++14.
5 */
7 #include <condition_variable>
8 #include <functional>
9 #include <algorithm>
10 #include <iostream>
11 #include <utility>
12 #include <iomanip>
13 #include <cstdint>
14 #include <cstring>
15 #include <cstdlib>
16 #include <atomic>
17 #include <cerrno>
18 #include <chrono>
19 #include <cstdio>
20 #include <future>
21 #include <memory>
22 #include <string>
23 #include <thread>
24 #include <vector>
25 #include <array>
26 #include <cmath>
27 #include <deque>
28 #include <mutex>
29 #include <ratio>
31 extern "C" {
32 #ifdef __GNUC__
33 _Pragma("GCC diagnostic push")
34 _Pragma("GCC diagnostic ignored \"-Wconversion\"")
35 _Pragma("GCC diagnostic ignored \"-Wold-style-cast\"")
36 #endif
37 #include "libavcodec/avcodec.h"
38 #include "libavformat/avformat.h"
39 #include "libavformat/avio.h"
40 #include "libavformat/version.h"
41 #include "libavutil/avutil.h"
42 #include "libavutil/error.h"
43 #include "libavutil/frame.h"
44 #include "libavutil/mem.h"
45 #include "libavutil/pixfmt.h"
46 #include "libavutil/rational.h"
47 #include "libavutil/samplefmt.h"
48 #include "libavutil/time.h"
49 #include "libavutil/version.h"
50 #include "libavutil/channel_layout.h"
51 #include "libswscale/swscale.h"
52 #include "libswresample/swresample.h"
54 constexpr auto AVNoPtsValue = AV_NOPTS_VALUE;
55 constexpr auto AVErrorEOF = AVERROR_EOF;
57 struct SwsContext;
58 #ifdef __GNUC__
59 _Pragma("GCC diagnostic pop")
60 #endif
63 #include "SDL.h"
65 #include "AL/alc.h"
66 #include "AL/al.h"
67 #include "AL/alext.h"
69 #include "common/alhelpers.h"
71 extern "C" {
72 /* Undefine this to disable use of experimental extensions. Don't use for
73 * production code! Interfaces and behavior may change prior to being
74 * finalized.
76 #define ALLOW_EXPERIMENTAL_EXTS
78 #ifdef ALLOW_EXPERIMENTAL_EXTS
79 #ifndef AL_SOFT_callback_buffer
80 #define AL_SOFT_callback_buffer
81 typedef unsigned int ALbitfieldSOFT;
82 #define AL_BUFFER_CALLBACK_FUNCTION_SOFT 0x19A0
83 #define AL_BUFFER_CALLBACK_USER_PARAM_SOFT 0x19A1
84 typedef ALsizei (AL_APIENTRY*ALBUFFERCALLBACKTYPESOFT)(ALvoid *userptr, ALvoid *sampledata, ALsizei numbytes);
85 typedef void (AL_APIENTRY*LPALBUFFERCALLBACKSOFT)(ALuint buffer, ALenum format, ALsizei freq, ALBUFFERCALLBACKTYPESOFT callback, ALvoid *userptr, ALbitfieldSOFT flags);
86 typedef void (AL_APIENTRY*LPALGETBUFFERPTRSOFT)(ALuint buffer, ALenum param, ALvoid **value);
87 typedef void (AL_APIENTRY*LPALGETBUFFER3PTRSOFT)(ALuint buffer, ALenum param, ALvoid **value1, ALvoid **value2, ALvoid **value3);
88 typedef void (AL_APIENTRY*LPALGETBUFFERPTRVSOFT)(ALuint buffer, ALenum param, ALvoid **values);
89 #endif
90 #ifndef AL_SOFT_UHJ
91 #define AL_SOFT_UHJ
92 #define AL_FORMAT_UHJ2CHN8_SOFT 0x19A2
93 #define AL_FORMAT_UHJ2CHN16_SOFT 0x19A3
94 #define AL_FORMAT_UHJ2CHN_FLOAT32_SOFT 0x19A4
95 #define AL_FORMAT_UHJ3CHN8_SOFT 0x19A5
96 #define AL_FORMAT_UHJ3CHN16_SOFT 0x19A6
97 #define AL_FORMAT_UHJ3CHN_FLOAT32_SOFT 0x19A7
98 #define AL_FORMAT_UHJ4CHN8_SOFT 0x19A8
99 #define AL_FORMAT_UHJ4CHN16_SOFT 0x19A9
100 #define AL_FORMAT_UHJ4CHN_FLOAT32_SOFT 0x19AA
101 #define AL_STEREO_MODE_SOFT 0x19B0
102 #define AL_NORMAL_SOFT 0x0000
103 #define AL_SUPER_STEREO_SOFT 0x0001
104 #define AL_SUPER_STEREO_WIDTH_SOFT 0x19B1
105 #endif
106 #endif /* ALLOW_EXPERIMENTAL_EXTS */
109 namespace {
111 inline constexpr int64_t operator "" _i64(unsigned long long int n) noexcept { return static_cast<int64_t>(n); }
113 #ifndef M_PI
114 #define M_PI (3.14159265358979323846)
115 #endif
117 using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1_i64<<32)>>;
118 using nanoseconds = std::chrono::nanoseconds;
119 using microseconds = std::chrono::microseconds;
120 using milliseconds = std::chrono::milliseconds;
121 using seconds = std::chrono::seconds;
122 using seconds_d64 = std::chrono::duration<double>;
123 using std::chrono::duration_cast;
125 const std::string AppName{"alffplay"};
127 ALenum DirectOutMode{AL_FALSE};
128 bool EnableWideStereo{false};
129 bool EnableSuperStereo{false};
130 bool DisableVideo{false};
131 LPALGETSOURCEI64VSOFT alGetSourcei64vSOFT;
132 LPALCGETINTEGER64VSOFT alcGetInteger64vSOFT;
133 LPALEVENTCONTROLSOFT alEventControlSOFT;
134 LPALEVENTCALLBACKSOFT alEventCallbackSOFT;
136 #ifdef AL_SOFT_callback_buffer
137 LPALBUFFERCALLBACKSOFT alBufferCallbackSOFT;
138 #endif
139 ALenum FormatStereo8{AL_FORMAT_STEREO8};
140 ALenum FormatStereo16{AL_FORMAT_STEREO16};
141 ALenum FormatStereo32F{AL_FORMAT_STEREO_FLOAT32};
143 const seconds AVNoSyncThreshold{10};
145 #define VIDEO_PICTURE_QUEUE_SIZE 24
147 const seconds_d64 AudioSyncThreshold{0.03};
148 const milliseconds AudioSampleCorrectionMax{50};
149 /* Averaging filter coefficient for audio sync. */
150 #define AUDIO_DIFF_AVG_NB 20
151 const double AudioAvgFilterCoeff{std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB)};
152 /* Per-buffer size, in time */
153 constexpr milliseconds AudioBufferTime{20};
154 /* Buffer total size, in time (should be divisible by the buffer time) */
155 constexpr milliseconds AudioBufferTotalTime{800};
156 constexpr auto AudioBufferCount = AudioBufferTotalTime / AudioBufferTime;
158 enum {
159 FF_MOVIE_DONE_EVENT = SDL_USEREVENT
162 enum class SyncMaster {
163 Audio,
164 Video,
165 External,
167 Default = Audio
171 inline microseconds get_avtime()
172 { return microseconds{av_gettime()}; }
174 /* Define unique_ptrs to auto-cleanup associated ffmpeg objects. */
175 struct AVIOContextDeleter {
176 void operator()(AVIOContext *ptr) { avio_closep(&ptr); }
178 using AVIOContextPtr = std::unique_ptr<AVIOContext,AVIOContextDeleter>;
180 struct AVFormatCtxDeleter {
181 void operator()(AVFormatContext *ptr) { avformat_close_input(&ptr); }
183 using AVFormatCtxPtr = std::unique_ptr<AVFormatContext,AVFormatCtxDeleter>;
185 struct AVCodecCtxDeleter {
186 void operator()(AVCodecContext *ptr) { avcodec_free_context(&ptr); }
188 using AVCodecCtxPtr = std::unique_ptr<AVCodecContext,AVCodecCtxDeleter>;
190 struct AVPacketDeleter {
191 void operator()(AVPacket *pkt) { av_packet_free(&pkt); }
193 using AVPacketPtr = std::unique_ptr<AVPacket,AVPacketDeleter>;
195 struct AVFrameDeleter {
196 void operator()(AVFrame *ptr) { av_frame_free(&ptr); }
198 using AVFramePtr = std::unique_ptr<AVFrame,AVFrameDeleter>;
200 struct SwrContextDeleter {
201 void operator()(SwrContext *ptr) { swr_free(&ptr); }
203 using SwrContextPtr = std::unique_ptr<SwrContext,SwrContextDeleter>;
205 struct SwsContextDeleter {
206 void operator()(SwsContext *ptr) { sws_freeContext(ptr); }
208 using SwsContextPtr = std::unique_ptr<SwsContext,SwsContextDeleter>;
211 template<size_t SizeLimit>
212 class DataQueue {
213 std::mutex mPacketMutex, mFrameMutex;
214 std::condition_variable mPacketCond;
215 std::condition_variable mInFrameCond, mOutFrameCond;
217 std::deque<AVPacketPtr> mPackets;
218 size_t mTotalSize{0};
219 bool mFinished{false};
221 AVPacketPtr getPacket()
223 std::unique_lock<std::mutex> plock{mPacketMutex};
224 while(mPackets.empty() && !mFinished)
225 mPacketCond.wait(plock);
226 if(mPackets.empty())
227 return nullptr;
229 auto ret = std::move(mPackets.front());
230 mPackets.pop_front();
231 mTotalSize -= static_cast<unsigned int>(ret->size);
232 return ret;
235 public:
236 int sendPacket(AVCodecContext *codecctx)
238 AVPacketPtr packet{getPacket()};
240 int ret{};
242 std::unique_lock<std::mutex> flock{mFrameMutex};
243 while((ret=avcodec_send_packet(codecctx, packet.get())) == AVERROR(EAGAIN))
244 mInFrameCond.wait_for(flock, milliseconds{50});
246 mOutFrameCond.notify_one();
248 if(!packet)
250 if(!ret) return AVErrorEOF;
251 std::cerr<< "Failed to send flush packet: "<<ret <<std::endl;
252 return ret;
254 if(ret < 0)
255 std::cerr<< "Failed to send packet: "<<ret <<std::endl;
256 return ret;
259 int receiveFrame(AVCodecContext *codecctx, AVFrame *frame)
261 int ret{};
263 std::unique_lock<std::mutex> flock{mFrameMutex};
264 while((ret=avcodec_receive_frame(codecctx, frame)) == AVERROR(EAGAIN))
265 mOutFrameCond.wait_for(flock, milliseconds{50});
267 mInFrameCond.notify_one();
268 return ret;
271 void setFinished()
274 std::lock_guard<std::mutex> _{mPacketMutex};
275 mFinished = true;
277 mPacketCond.notify_one();
280 void flush()
283 std::lock_guard<std::mutex> _{mPacketMutex};
284 mFinished = true;
286 mPackets.clear();
287 mTotalSize = 0;
289 mPacketCond.notify_one();
292 bool put(const AVPacket *pkt)
295 std::unique_lock<std::mutex> lock{mPacketMutex};
296 if(mTotalSize >= SizeLimit || mFinished)
297 return false;
299 mPackets.push_back(AVPacketPtr{av_packet_alloc()});
300 if(av_packet_ref(mPackets.back().get(), pkt) != 0)
302 mPackets.pop_back();
303 return true;
306 mTotalSize += static_cast<unsigned int>(mPackets.back()->size);
308 mPacketCond.notify_one();
309 return true;
314 struct MovieState;
316 struct AudioState {
317 MovieState &mMovie;
319 AVStream *mStream{nullptr};
320 AVCodecCtxPtr mCodecCtx;
322 DataQueue<2*1024*1024> mQueue;
324 /* Used for clock difference average computation */
325 seconds_d64 mClockDiffAvg{0};
327 /* Time of the next sample to be buffered */
328 nanoseconds mCurrentPts{0};
330 /* Device clock time that the stream started at. */
331 nanoseconds mDeviceStartTime{nanoseconds::min()};
333 /* Decompressed sample frame, and swresample context for conversion */
334 AVFramePtr mDecodedFrame;
335 SwrContextPtr mSwresCtx;
337 /* Conversion format, for what gets fed to OpenAL */
338 uint64_t mDstChanLayout{0};
339 AVSampleFormat mDstSampleFmt{AV_SAMPLE_FMT_NONE};
341 /* Storage of converted samples */
342 uint8_t *mSamples{nullptr};
343 int mSamplesLen{0}; /* In samples */
344 int mSamplesPos{0};
345 int mSamplesMax{0};
347 std::unique_ptr<uint8_t[]> mBufferData;
348 size_t mBufferDataSize{0};
349 std::atomic<size_t> mReadPos{0};
350 std::atomic<size_t> mWritePos{0};
352 /* OpenAL format */
353 ALenum mFormat{AL_NONE};
354 ALuint mFrameSize{0};
356 std::mutex mSrcMutex;
357 std::condition_variable mSrcCond;
358 std::atomic_flag mConnected;
359 ALuint mSource{0};
360 std::array<ALuint,AudioBufferCount> mBuffers{};
361 ALuint mBufferIdx{0};
363 AudioState(MovieState &movie) : mMovie(movie)
364 { mConnected.test_and_set(std::memory_order_relaxed); }
365 ~AudioState()
367 if(mSource)
368 alDeleteSources(1, &mSource);
369 if(mBuffers[0])
370 alDeleteBuffers(static_cast<ALsizei>(mBuffers.size()), mBuffers.data());
372 av_freep(&mSamples);
375 static void AL_APIENTRY eventCallbackC(ALenum eventType, ALuint object, ALuint param,
376 ALsizei length, const ALchar *message, void *userParam)
377 { static_cast<AudioState*>(userParam)->eventCallback(eventType, object, param, length, message); }
378 void eventCallback(ALenum eventType, ALuint object, ALuint param, ALsizei length,
379 const ALchar *message);
381 #ifdef AL_SOFT_callback_buffer
382 static ALsizei AL_APIENTRY bufferCallbackC(void *userptr, void *data, ALsizei size)
383 { return static_cast<AudioState*>(userptr)->bufferCallback(data, size); }
384 ALsizei bufferCallback(void *data, ALsizei size);
385 #endif
387 nanoseconds getClockNoLock();
388 nanoseconds getClock()
390 std::lock_guard<std::mutex> lock{mSrcMutex};
391 return getClockNoLock();
394 bool startPlayback();
396 int getSync();
397 int decodeFrame();
398 bool readAudio(uint8_t *samples, unsigned int length, int &sample_skip);
399 bool readAudio(int sample_skip);
401 int handler();
404 struct VideoState {
405 MovieState &mMovie;
407 AVStream *mStream{nullptr};
408 AVCodecCtxPtr mCodecCtx;
410 DataQueue<14*1024*1024> mQueue;
412 /* The pts of the currently displayed frame, and the time (av_gettime) it
413 * was last updated - used to have running video pts
415 nanoseconds mDisplayPts{0};
416 microseconds mDisplayPtsTime{microseconds::min()};
417 std::mutex mDispPtsMutex;
419 /* Swscale context for format conversion */
420 SwsContextPtr mSwscaleCtx;
422 struct Picture {
423 AVFramePtr mFrame{};
424 nanoseconds mPts{nanoseconds::min()};
426 std::array<Picture,VIDEO_PICTURE_QUEUE_SIZE> mPictQ;
427 std::atomic<size_t> mPictQRead{0u}, mPictQWrite{1u};
428 std::mutex mPictQMutex;
429 std::condition_variable mPictQCond;
431 SDL_Texture *mImage{nullptr};
432 int mWidth{0}, mHeight{0}; /* Full texture size */
433 bool mFirstUpdate{true};
435 std::atomic<bool> mEOS{false};
436 std::atomic<bool> mFinalUpdate{false};
438 VideoState(MovieState &movie) : mMovie(movie) { }
439 ~VideoState()
441 if(mImage)
442 SDL_DestroyTexture(mImage);
443 mImage = nullptr;
446 nanoseconds getClock();
448 void display(SDL_Window *screen, SDL_Renderer *renderer, AVFrame *frame);
449 void updateVideo(SDL_Window *screen, SDL_Renderer *renderer, bool redraw);
450 int handler();
453 struct MovieState {
454 AVIOContextPtr mIOContext;
455 AVFormatCtxPtr mFormatCtx;
457 SyncMaster mAVSyncType{SyncMaster::Default};
459 microseconds mClockBase{microseconds::min()};
461 std::atomic<bool> mQuit{false};
463 AudioState mAudio;
464 VideoState mVideo;
466 std::mutex mStartupMutex;
467 std::condition_variable mStartupCond;
468 bool mStartupDone{false};
470 std::thread mParseThread;
471 std::thread mAudioThread;
472 std::thread mVideoThread;
474 std::string mFilename;
476 MovieState(std::string fname)
477 : mAudio(*this), mVideo(*this), mFilename(std::move(fname))
479 ~MovieState()
481 stop();
482 if(mParseThread.joinable())
483 mParseThread.join();
486 static int decode_interrupt_cb(void *ctx);
487 bool prepare();
488 void setTitle(SDL_Window *window);
489 void stop();
491 nanoseconds getClock();
493 nanoseconds getMasterClock();
495 nanoseconds getDuration();
497 int streamComponentOpen(unsigned int stream_index);
498 int parse_handler();
502 nanoseconds AudioState::getClockNoLock()
504 // The audio clock is the timestamp of the sample currently being heard.
505 if(alcGetInteger64vSOFT)
507 // If device start time = min, we aren't playing yet.
508 if(mDeviceStartTime == nanoseconds::min())
509 return nanoseconds::zero();
511 // Get the current device clock time and latency.
512 auto device = alcGetContextsDevice(alcGetCurrentContext());
513 ALCint64SOFT devtimes[2]{0,0};
514 alcGetInteger64vSOFT(device, ALC_DEVICE_CLOCK_LATENCY_SOFT, 2, devtimes);
515 auto latency = nanoseconds{devtimes[1]};
516 auto device_time = nanoseconds{devtimes[0]};
518 // The clock is simply the current device time relative to the recorded
519 // start time. We can also subtract the latency to get more a accurate
520 // position of where the audio device actually is in the output stream.
521 return device_time - mDeviceStartTime - latency;
524 if(mBufferDataSize > 0)
526 if(mDeviceStartTime == nanoseconds::min())
527 return nanoseconds::zero();
529 /* With a callback buffer and no device clock, mDeviceStartTime is
530 * actually the timestamp of the first sample frame played. The audio
531 * clock, then, is that plus the current source offset.
533 ALint64SOFT offset[2];
534 if(alGetSourcei64vSOFT)
535 alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_LATENCY_SOFT, offset);
536 else
538 ALint ioffset;
539 alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
540 offset[0] = ALint64SOFT{ioffset} << 32;
541 offset[1] = 0;
543 /* NOTE: The source state must be checked last, in case an underrun
544 * occurs and the source stops between getting the state and retrieving
545 * the offset+latency.
547 ALint status;
548 alGetSourcei(mSource, AL_SOURCE_STATE, &status);
550 nanoseconds pts{};
551 if(status == AL_PLAYING || status == AL_PAUSED)
552 pts = mDeviceStartTime - nanoseconds{offset[1]} +
553 duration_cast<nanoseconds>(fixed32{offset[0] / mCodecCtx->sample_rate});
554 else
556 /* If the source is stopped, the pts of the next sample to be heard
557 * is the pts of the next sample to be buffered, minus the amount
558 * already in the buffer ready to play.
560 const size_t woffset{mWritePos.load(std::memory_order_acquire)};
561 const size_t roffset{mReadPos.load(std::memory_order_relaxed)};
562 const size_t readable{((woffset >= roffset) ? woffset : (mBufferDataSize+woffset)) -
563 roffset};
565 pts = mCurrentPts - nanoseconds{seconds{readable/mFrameSize}}/mCodecCtx->sample_rate;
568 return pts;
571 /* The source-based clock is based on 4 components:
572 * 1 - The timestamp of the next sample to buffer (mCurrentPts)
573 * 2 - The length of the source's buffer queue
574 * (AudioBufferTime*AL_BUFFERS_QUEUED)
575 * 3 - The offset OpenAL is currently at in the source (the first value
576 * from AL_SAMPLE_OFFSET_LATENCY_SOFT)
577 * 4 - The latency between OpenAL and the DAC (the second value from
578 * AL_SAMPLE_OFFSET_LATENCY_SOFT)
580 * Subtracting the length of the source queue from the next sample's
581 * timestamp gives the timestamp of the sample at the start of the source
582 * queue. Adding the source offset to that results in the timestamp for the
583 * sample at OpenAL's current position, and subtracting the source latency
584 * from that gives the timestamp of the sample currently at the DAC.
586 nanoseconds pts{mCurrentPts};
587 if(mSource)
589 ALint64SOFT offset[2];
590 if(alGetSourcei64vSOFT)
591 alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_LATENCY_SOFT, offset);
592 else
594 ALint ioffset;
595 alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
596 offset[0] = ALint64SOFT{ioffset} << 32;
597 offset[1] = 0;
599 ALint queued, status;
600 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
601 alGetSourcei(mSource, AL_SOURCE_STATE, &status);
603 /* If the source is AL_STOPPED, then there was an underrun and all
604 * buffers are processed, so ignore the source queue. The audio thread
605 * will put the source into an AL_INITIAL state and clear the queue
606 * when it starts recovery.
608 if(status != AL_STOPPED)
610 pts -= AudioBufferTime*queued;
611 pts += duration_cast<nanoseconds>(fixed32{offset[0] / mCodecCtx->sample_rate});
613 /* Don't offset by the latency if the source isn't playing. */
614 if(status == AL_PLAYING)
615 pts -= nanoseconds{offset[1]};
618 return std::max(pts, nanoseconds::zero());
621 bool AudioState::startPlayback()
623 const size_t woffset{mWritePos.load(std::memory_order_acquire)};
624 const size_t roffset{mReadPos.load(std::memory_order_relaxed)};
625 const size_t readable{((woffset >= roffset) ? woffset : (mBufferDataSize+woffset)) -
626 roffset};
628 if(mBufferDataSize > 0)
630 if(readable == 0)
631 return false;
632 if(!alcGetInteger64vSOFT)
633 mDeviceStartTime = mCurrentPts -
634 nanoseconds{seconds{readable/mFrameSize}}/mCodecCtx->sample_rate;
636 else
638 ALint queued{};
639 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
640 if(queued == 0) return false;
643 alSourcePlay(mSource);
644 if(alcGetInteger64vSOFT)
646 /* Subtract the total buffer queue time from the current pts to get the
647 * pts of the start of the queue.
649 int64_t srctimes[2]{0,0};
650 alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_CLOCK_SOFT, srctimes);
651 auto device_time = nanoseconds{srctimes[1]};
652 auto src_offset = duration_cast<nanoseconds>(fixed32{srctimes[0]}) /
653 mCodecCtx->sample_rate;
655 /* The mixer may have ticked and incremented the device time and sample
656 * offset, so subtract the source offset from the device time to get
657 * the device time the source started at. Also subtract startpts to get
658 * the device time the stream would have started at to reach where it
659 * is now.
661 if(mBufferDataSize > 0)
663 nanoseconds startpts{mCurrentPts -
664 nanoseconds{seconds{readable/mFrameSize}}/mCodecCtx->sample_rate};
665 mDeviceStartTime = device_time - src_offset - startpts;
667 else
669 nanoseconds startpts{mCurrentPts - AudioBufferTotalTime};
670 mDeviceStartTime = device_time - src_offset - startpts;
673 return true;
676 int AudioState::getSync()
678 if(mMovie.mAVSyncType == SyncMaster::Audio)
679 return 0;
681 auto ref_clock = mMovie.getMasterClock();
682 auto diff = ref_clock - getClockNoLock();
684 if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
686 /* Difference is TOO big; reset accumulated average */
687 mClockDiffAvg = seconds_d64::zero();
688 return 0;
691 /* Accumulate the diffs */
692 mClockDiffAvg = mClockDiffAvg*AudioAvgFilterCoeff + diff;
693 auto avg_diff = mClockDiffAvg*(1.0 - AudioAvgFilterCoeff);
694 if(avg_diff < AudioSyncThreshold/2.0 && avg_diff > -AudioSyncThreshold)
695 return 0;
697 /* Constrain the per-update difference to avoid exceedingly large skips */
698 diff = std::min<nanoseconds>(diff, AudioSampleCorrectionMax);
699 return static_cast<int>(duration_cast<seconds>(diff*mCodecCtx->sample_rate).count());
702 int AudioState::decodeFrame()
704 do {
705 while(int ret{mQueue.receiveFrame(mCodecCtx.get(), mDecodedFrame.get())})
707 if(ret == AVErrorEOF) return 0;
708 std::cerr<< "Failed to receive frame: "<<ret <<std::endl;
710 } while(mDecodedFrame->nb_samples <= 0);
712 /* If provided, update w/ pts */
713 if(mDecodedFrame->best_effort_timestamp != AVNoPtsValue)
714 mCurrentPts = duration_cast<nanoseconds>(seconds_d64{av_q2d(mStream->time_base) *
715 static_cast<double>(mDecodedFrame->best_effort_timestamp)});
717 if(mDecodedFrame->nb_samples > mSamplesMax)
719 av_freep(&mSamples);
720 av_samples_alloc(&mSamples, nullptr, mCodecCtx->channels, mDecodedFrame->nb_samples,
721 mDstSampleFmt, 0);
722 mSamplesMax = mDecodedFrame->nb_samples;
724 /* Return the amount of sample frames converted */
725 int data_size{swr_convert(mSwresCtx.get(), &mSamples, mDecodedFrame->nb_samples,
726 const_cast<const uint8_t**>(mDecodedFrame->data), mDecodedFrame->nb_samples)};
728 av_frame_unref(mDecodedFrame.get());
729 return data_size;
732 /* Duplicates the sample at in to out, count times. The frame size is a
733 * multiple of the template type size.
735 template<typename T>
736 static void sample_dup(uint8_t *out, const uint8_t *in, size_t count, size_t frame_size)
738 auto *sample = reinterpret_cast<const T*>(in);
739 auto *dst = reinterpret_cast<T*>(out);
740 if(frame_size == sizeof(T))
741 std::fill_n(dst, count, *sample);
742 else
744 /* NOTE: frame_size is a multiple of sizeof(T). */
745 size_t type_mult{frame_size / sizeof(T)};
746 size_t i{0};
747 std::generate_n(dst, count*type_mult,
748 [sample,type_mult,&i]() -> T
750 T ret = sample[i];
751 i = (i+1)%type_mult;
752 return ret;
759 bool AudioState::readAudio(uint8_t *samples, unsigned int length, int &sample_skip)
761 unsigned int audio_size{0};
763 /* Read the next chunk of data, refill the buffer, and queue it
764 * on the source */
765 length /= mFrameSize;
766 while(mSamplesLen > 0 && audio_size < length)
768 unsigned int rem{length - audio_size};
769 if(mSamplesPos >= 0)
771 const auto len = static_cast<unsigned int>(mSamplesLen - mSamplesPos);
772 if(rem > len) rem = len;
773 std::copy_n(mSamples + static_cast<unsigned int>(mSamplesPos)*mFrameSize,
774 rem*mFrameSize, samples);
776 else
778 rem = std::min(rem, static_cast<unsigned int>(-mSamplesPos));
780 /* Add samples by copying the first sample */
781 if((mFrameSize&7) == 0)
782 sample_dup<uint64_t>(samples, mSamples, rem, mFrameSize);
783 else if((mFrameSize&3) == 0)
784 sample_dup<uint32_t>(samples, mSamples, rem, mFrameSize);
785 else if((mFrameSize&1) == 0)
786 sample_dup<uint16_t>(samples, mSamples, rem, mFrameSize);
787 else
788 sample_dup<uint8_t>(samples, mSamples, rem, mFrameSize);
791 mSamplesPos += rem;
792 mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate;
793 samples += rem*mFrameSize;
794 audio_size += rem;
796 while(mSamplesPos >= mSamplesLen)
798 mSamplesLen = decodeFrame();
799 mSamplesPos = std::min(mSamplesLen, sample_skip);
800 if(mSamplesLen <= 0) break;
802 sample_skip -= mSamplesPos;
804 // Adjust the device start time and current pts by the amount we're
805 // skipping/duplicating, so that the clock remains correct for the
806 // current stream position.
807 auto skip = nanoseconds{seconds{mSamplesPos}} / mCodecCtx->sample_rate;
808 mDeviceStartTime -= skip;
809 mCurrentPts += skip;
810 continue;
813 if(audio_size <= 0)
814 return false;
816 if(audio_size < length)
818 const unsigned int rem{length - audio_size};
819 std::fill_n(samples, rem*mFrameSize,
820 (mDstSampleFmt == AV_SAMPLE_FMT_U8) ? 0x80 : 0x00);
821 mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate;
822 audio_size += rem;
824 return true;
827 bool AudioState::readAudio(int sample_skip)
829 size_t woffset{mWritePos.load(std::memory_order_acquire)};
830 while(mSamplesLen > 0)
832 const size_t roffset{mReadPos.load(std::memory_order_relaxed)};
834 if(mSamplesPos < 0)
836 size_t rem{(((roffset > woffset) ? roffset-1
837 : ((roffset == 0) ? mBufferDataSize-1
838 : mBufferDataSize)) - woffset) / mFrameSize};
839 rem = std::min<size_t>(rem, static_cast<ALuint>(-mSamplesPos));
840 if(rem == 0) break;
842 auto *splout{&mBufferData[woffset]};
843 if((mFrameSize&7) == 0)
844 sample_dup<uint64_t>(splout, mSamples, rem, mFrameSize);
845 else if((mFrameSize&3) == 0)
846 sample_dup<uint32_t>(splout, mSamples, rem, mFrameSize);
847 else if((mFrameSize&1) == 0)
848 sample_dup<uint16_t>(splout, mSamples, rem, mFrameSize);
849 else
850 sample_dup<uint8_t>(splout, mSamples, rem, mFrameSize);
851 woffset += rem * mFrameSize;
852 if(woffset == mBufferDataSize)
853 woffset = 0;
854 mWritePos.store(woffset, std::memory_order_release);
855 mSamplesPos += static_cast<int>(rem);
856 mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate;
857 continue;
860 const size_t boffset{static_cast<ALuint>(mSamplesPos) * size_t{mFrameSize}};
861 const size_t nbytes{static_cast<ALuint>(mSamplesLen)*size_t{mFrameSize} -
862 boffset};
863 if(roffset > woffset)
865 const size_t writable{roffset-woffset-1};
866 if(writable < nbytes) break;
868 memcpy(&mBufferData[woffset], mSamples+boffset, nbytes);
869 woffset += nbytes;
871 else
873 const size_t writable{mBufferDataSize+roffset-woffset-1};
874 if(writable < nbytes) break;
876 const size_t todo1{std::min<size_t>(nbytes, mBufferDataSize-woffset)};
877 const size_t todo2{nbytes - todo1};
879 memcpy(&mBufferData[woffset], mSamples+boffset, todo1);
880 woffset += todo1;
881 if(woffset == mBufferDataSize)
883 woffset = 0;
884 if(todo2 > 0)
886 memcpy(&mBufferData[woffset], mSamples+boffset+todo1, todo2);
887 woffset += todo2;
891 mWritePos.store(woffset, std::memory_order_release);
892 mCurrentPts += nanoseconds{seconds{mSamplesLen-mSamplesPos}} / mCodecCtx->sample_rate;
894 do {
895 mSamplesLen = decodeFrame();
896 mSamplesPos = std::min(mSamplesLen, sample_skip);
897 if(mSamplesLen <= 0) return false;
899 sample_skip -= mSamplesPos;
901 auto skip = nanoseconds{seconds{mSamplesPos}} / mCodecCtx->sample_rate;
902 mDeviceStartTime -= skip;
903 mCurrentPts += skip;
904 } while(mSamplesPos >= mSamplesLen);
907 return true;
911 void AL_APIENTRY AudioState::eventCallback(ALenum eventType, ALuint object, ALuint param,
912 ALsizei length, const ALchar *message)
914 if(eventType == AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT)
916 /* Temporarily lock the source mutex to ensure it's not between
917 * checking the processed count and going to sleep.
919 std::unique_lock<std::mutex>{mSrcMutex}.unlock();
920 mSrcCond.notify_one();
921 return;
924 std::cout<< "\n---- AL Event on AudioState "<<this<<" ----\nEvent: ";
925 switch(eventType)
927 case AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT: std::cout<< "Buffer completed"; break;
928 case AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT: std::cout<< "Source state changed"; break;
929 case AL_EVENT_TYPE_DISCONNECTED_SOFT: std::cout<< "Disconnected"; break;
930 default:
931 std::cout<< "0x"<<std::hex<<std::setw(4)<<std::setfill('0')<<eventType<<std::dec<<
932 std::setw(0)<<std::setfill(' '); break;
934 std::cout<< "\n"
935 "Object ID: "<<object<<"\n"
936 "Parameter: "<<param<<"\n"
937 "Message: "<<std::string{message, static_cast<ALuint>(length)}<<"\n----"<<
938 std::endl;
940 if(eventType == AL_EVENT_TYPE_DISCONNECTED_SOFT)
943 std::lock_guard<std::mutex> lock{mSrcMutex};
944 mConnected.clear(std::memory_order_release);
946 mSrcCond.notify_one();
950 #ifdef AL_SOFT_callback_buffer
951 ALsizei AudioState::bufferCallback(void *data, ALsizei size)
953 ALsizei got{0};
955 size_t roffset{mReadPos.load(std::memory_order_acquire)};
956 while(got < size)
958 const size_t woffset{mWritePos.load(std::memory_order_relaxed)};
959 if(woffset == roffset) break;
961 size_t todo{((woffset < roffset) ? mBufferDataSize : woffset) - roffset};
962 todo = std::min<size_t>(todo, static_cast<ALuint>(size-got));
964 memcpy(data, &mBufferData[roffset], todo);
965 data = static_cast<ALbyte*>(data) + todo;
966 got += static_cast<ALsizei>(todo);
968 roffset += todo;
969 if(roffset == mBufferDataSize)
970 roffset = 0;
972 mReadPos.store(roffset, std::memory_order_release);
974 return got;
976 #endif
978 int AudioState::handler()
980 std::unique_lock<std::mutex> srclock{mSrcMutex, std::defer_lock};
981 milliseconds sleep_time{AudioBufferTime / 3};
983 struct EventControlManager {
984 const std::array<ALenum,3> evt_types{{
985 AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT, AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT,
986 AL_EVENT_TYPE_DISCONNECTED_SOFT}};
988 EventControlManager(milliseconds &sleep_time)
990 if(alEventControlSOFT)
992 alEventControlSOFT(static_cast<ALsizei>(evt_types.size()), evt_types.data(),
993 AL_TRUE);
994 alEventCallbackSOFT(&AudioState::eventCallbackC, this);
995 sleep_time = AudioBufferTotalTime;
998 ~EventControlManager()
1000 if(alEventControlSOFT)
1002 alEventControlSOFT(static_cast<ALsizei>(evt_types.size()), evt_types.data(),
1003 AL_FALSE);
1004 alEventCallbackSOFT(nullptr, nullptr);
1008 EventControlManager event_controller{sleep_time};
1010 const bool has_bfmt_ex{alIsExtensionPresent("AL_SOFT_bformat_ex") != AL_FALSE};
1011 ALenum ambi_layout{AL_FUMA_SOFT};
1012 ALenum ambi_scale{AL_FUMA_SOFT};
1014 std::unique_ptr<uint8_t[]> samples;
1015 ALsizei buffer_len{0};
1017 /* Find a suitable format for OpenAL. */
1018 mDstChanLayout = 0;
1019 mFormat = AL_NONE;
1020 if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP
1021 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_DBL
1022 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_DBLP
1023 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_S32
1024 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_S32P
1025 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_S64
1026 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_S64P)
1027 && alIsExtensionPresent("AL_EXT_FLOAT32"))
1029 mDstSampleFmt = AV_SAMPLE_FMT_FLT;
1030 mFrameSize = 4;
1031 if(alIsExtensionPresent("AL_EXT_MCFORMATS"))
1033 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1)
1035 mDstChanLayout = mCodecCtx->channel_layout;
1036 mFrameSize *= 8;
1037 mFormat = alGetEnumValue("AL_FORMAT_71CHN32");
1039 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1
1040 || mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK)
1042 mDstChanLayout = mCodecCtx->channel_layout;
1043 mFrameSize *= 6;
1044 mFormat = alGetEnumValue("AL_FORMAT_51CHN32");
1046 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_QUAD)
1048 mDstChanLayout = mCodecCtx->channel_layout;
1049 mFrameSize *= 4;
1050 mFormat = alGetEnumValue("AL_FORMAT_QUAD32");
1053 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
1055 mDstChanLayout = mCodecCtx->channel_layout;
1056 mFrameSize *= 1;
1057 mFormat = AL_FORMAT_MONO_FLOAT32;
1059 /* Assume 3D B-Format (ambisonics) if the channel layout is blank and
1060 * there's 4 or more channels. FFmpeg/libavcodec otherwise seems to
1061 * have no way to specify if the source is actually B-Format (let alone
1062 * if it's 2D or 3D).
1064 if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4
1065 && alIsExtensionPresent("AL_EXT_BFORMAT"))
1067 /* Calculate what should be the ambisonic order from the number of
1068 * channels, and confirm that's the number of channels. Opus allows
1069 * an optional non-diegetic stereo stream with the B-Format stream,
1070 * which we can ignore, so check for that too.
1072 auto order = static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1;
1073 int channels{(order+1) * (order+1)};
1074 if(channels == mCodecCtx->channels || channels+2 == mCodecCtx->channels)
1076 /* OpenAL only supports first-order with AL_EXT_BFORMAT, which
1077 * is 4 channels for 3D buffers.
1079 mFrameSize *= 4;
1080 mFormat = alGetEnumValue("AL_FORMAT_BFORMAT3D_FLOAT32");
1083 if(!mFormat || mFormat == -1)
1085 mDstChanLayout = AV_CH_LAYOUT_STEREO;
1086 mFrameSize *= 2;
1087 mFormat = FormatStereo32F;
1090 if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P)
1092 mDstSampleFmt = AV_SAMPLE_FMT_U8;
1093 mFrameSize = 1;
1094 if(alIsExtensionPresent("AL_EXT_MCFORMATS"))
1096 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1)
1098 mDstChanLayout = mCodecCtx->channel_layout;
1099 mFrameSize *= 8;
1100 mFormat = alGetEnumValue("AL_FORMAT_71CHN8");
1102 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1
1103 || mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK)
1105 mDstChanLayout = mCodecCtx->channel_layout;
1106 mFrameSize *= 6;
1107 mFormat = alGetEnumValue("AL_FORMAT_51CHN8");
1109 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_QUAD)
1111 mDstChanLayout = mCodecCtx->channel_layout;
1112 mFrameSize *= 4;
1113 mFormat = alGetEnumValue("AL_FORMAT_QUAD8");
1116 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
1118 mDstChanLayout = mCodecCtx->channel_layout;
1119 mFrameSize *= 1;
1120 mFormat = AL_FORMAT_MONO8;
1122 if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4
1123 && alIsExtensionPresent("AL_EXT_BFORMAT"))
1125 auto order = static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1;
1126 int channels{(order+1) * (order+1)};
1127 if(channels == mCodecCtx->channels || channels+2 == mCodecCtx->channels)
1129 mFrameSize *= 4;
1130 mFormat = alGetEnumValue("AL_FORMAT_BFORMAT3D_8");
1133 if(!mFormat || mFormat == -1)
1135 mDstChanLayout = AV_CH_LAYOUT_STEREO;
1136 mFrameSize *= 2;
1137 mFormat = FormatStereo8;
1140 if(!mFormat || mFormat == -1)
1142 mDstSampleFmt = AV_SAMPLE_FMT_S16;
1143 mFrameSize = 2;
1144 if(alIsExtensionPresent("AL_EXT_MCFORMATS"))
1146 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1)
1148 mDstChanLayout = mCodecCtx->channel_layout;
1149 mFrameSize *= 8;
1150 mFormat = alGetEnumValue("AL_FORMAT_71CHN16");
1152 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1
1153 || mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK)
1155 mDstChanLayout = mCodecCtx->channel_layout;
1156 mFrameSize *= 6;
1157 mFormat = alGetEnumValue("AL_FORMAT_51CHN16");
1159 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_QUAD)
1161 mDstChanLayout = mCodecCtx->channel_layout;
1162 mFrameSize *= 4;
1163 mFormat = alGetEnumValue("AL_FORMAT_QUAD16");
1166 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
1168 mDstChanLayout = mCodecCtx->channel_layout;
1169 mFrameSize *= 1;
1170 mFormat = AL_FORMAT_MONO16;
1172 if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4
1173 && alIsExtensionPresent("AL_EXT_BFORMAT"))
1175 auto order = static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1;
1176 int channels{(order+1) * (order+1)};
1177 if(channels == mCodecCtx->channels || channels+2 == mCodecCtx->channels)
1179 mFrameSize *= 4;
1180 mFormat = alGetEnumValue("AL_FORMAT_BFORMAT3D_16");
1183 if(!mFormat || mFormat == -1)
1185 mDstChanLayout = AV_CH_LAYOUT_STEREO;
1186 mFrameSize *= 2;
1187 mFormat = FormatStereo16;
1191 mSamples = nullptr;
1192 mSamplesMax = 0;
1193 mSamplesPos = 0;
1194 mSamplesLen = 0;
1196 mDecodedFrame.reset(av_frame_alloc());
1197 if(!mDecodedFrame)
1199 std::cerr<< "Failed to allocate audio frame" <<std::endl;
1200 return 0;
1203 if(!mDstChanLayout)
1205 /* OpenAL only supports first-order ambisonics with AL_EXT_BFORMAT, so
1206 * we have to drop any extra channels.
1208 mSwresCtx.reset(swr_alloc_set_opts(nullptr,
1209 (1_i64<<4)-1, mDstSampleFmt, mCodecCtx->sample_rate,
1210 (1_i64<<mCodecCtx->channels)-1, mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
1211 0, nullptr));
1213 /* Note that ffmpeg/libavcodec has no method to check the ambisonic
1214 * channel order and normalization, so we can only assume AmbiX as the
1215 * defacto-standard. This is not true for .amb files, which use FuMa.
1217 std::vector<double> mtx(64*64, 0.0);
1218 ambi_layout = AL_ACN_SOFT;
1219 ambi_scale = AL_SN3D_SOFT;
1220 if(has_bfmt_ex)
1222 /* An identity matrix that doesn't remix any channels. */
1223 std::cout<< "Found AL_SOFT_bformat_ex" <<std::endl;
1224 mtx[0 + 0*64] = 1.0;
1225 mtx[1 + 1*64] = 1.0;
1226 mtx[2 + 2*64] = 1.0;
1227 mtx[3 + 3*64] = 1.0;
1229 else
1231 std::cout<< "Found AL_EXT_BFORMAT" <<std::endl;
1232 /* Without AL_SOFT_bformat_ex, OpenAL only supports FuMa channel
1233 * ordering and normalization, so a custom matrix is needed to
1234 * scale and reorder the source from AmbiX.
1236 mtx[0 + 0*64] = std::sqrt(0.5);
1237 mtx[3 + 1*64] = 1.0;
1238 mtx[1 + 2*64] = 1.0;
1239 mtx[2 + 3*64] = 1.0;
1241 swr_set_matrix(mSwresCtx.get(), mtx.data(), 64);
1243 else
1244 mSwresCtx.reset(swr_alloc_set_opts(nullptr,
1245 static_cast<int64_t>(mDstChanLayout), mDstSampleFmt, mCodecCtx->sample_rate,
1246 mCodecCtx->channel_layout ? static_cast<int64_t>(mCodecCtx->channel_layout)
1247 : av_get_default_channel_layout(mCodecCtx->channels),
1248 mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
1249 0, nullptr));
1250 if(!mSwresCtx || swr_init(mSwresCtx.get()) != 0)
1252 std::cerr<< "Failed to initialize audio converter" <<std::endl;
1253 return 0;
1256 alGenBuffers(static_cast<ALsizei>(mBuffers.size()), mBuffers.data());
1257 alGenSources(1, &mSource);
1259 if(DirectOutMode)
1260 alSourcei(mSource, AL_DIRECT_CHANNELS_SOFT, DirectOutMode);
1261 if(EnableWideStereo)
1263 const float angles[2]{static_cast<float>(M_PI / 3.0), static_cast<float>(-M_PI / 3.0)};
1264 alSourcefv(mSource, AL_STEREO_ANGLES, angles);
1266 if(has_bfmt_ex)
1268 for(ALuint bufid : mBuffers)
1270 alBufferi(bufid, AL_AMBISONIC_LAYOUT_SOFT, ambi_layout);
1271 alBufferi(bufid, AL_AMBISONIC_SCALING_SOFT, ambi_scale);
1274 #ifdef AL_SOFT_UHJ
1275 if(EnableSuperStereo)
1276 alSourcei(mSource, AL_STEREO_MODE_SOFT, AL_SUPER_STEREO_SOFT);
1277 #endif
1279 if(alGetError() != AL_NO_ERROR)
1280 return 0;
1282 #ifdef AL_SOFT_callback_buffer
1283 bool callback_ok{false};
1284 if(alBufferCallbackSOFT)
1286 alBufferCallbackSOFT(mBuffers[0], mFormat, mCodecCtx->sample_rate, bufferCallbackC, this,
1288 alSourcei(mSource, AL_BUFFER, static_cast<ALint>(mBuffers[0]));
1289 if(alGetError() != AL_NO_ERROR)
1291 fprintf(stderr, "Failed to set buffer callback\n");
1292 alSourcei(mSource, AL_BUFFER, 0);
1294 else
1296 mBufferDataSize = static_cast<size_t>(duration_cast<seconds>(mCodecCtx->sample_rate *
1297 AudioBufferTotalTime).count()) * mFrameSize;
1298 mBufferData = std::make_unique<uint8_t[]>(mBufferDataSize);
1299 std::fill_n(mBufferData.get(), mBufferDataSize, uint8_t{});
1301 mReadPos.store(0, std::memory_order_relaxed);
1302 mWritePos.store(mBufferDataSize/mFrameSize/2*mFrameSize, std::memory_order_relaxed);
1304 ALCint refresh{};
1305 alcGetIntegerv(alcGetContextsDevice(alcGetCurrentContext()), ALC_REFRESH, 1, &refresh);
1306 sleep_time = milliseconds{seconds{1}} / refresh;
1307 callback_ok = true;
1310 if(!callback_ok)
1311 #endif
1312 buffer_len = static_cast<int>(duration_cast<seconds>(mCodecCtx->sample_rate *
1313 AudioBufferTime).count() * mFrameSize);
1314 if(buffer_len > 0)
1315 samples = std::make_unique<uint8_t[]>(static_cast<ALuint>(buffer_len));
1317 /* Prefill the codec buffer. */
1318 auto packet_sender = [this]()
1320 while(1)
1322 const int ret{mQueue.sendPacket(mCodecCtx.get())};
1323 if(ret == AVErrorEOF) break;
1326 auto sender = std::async(std::launch::async, packet_sender);
1328 srclock.lock();
1329 if(alcGetInteger64vSOFT)
1331 int64_t devtime{};
1332 alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()), ALC_DEVICE_CLOCK_SOFT,
1333 1, &devtime);
1334 mDeviceStartTime = nanoseconds{devtime} - mCurrentPts;
1337 mSamplesLen = decodeFrame();
1338 if(mSamplesLen > 0)
1340 mSamplesPos = std::min(mSamplesLen, getSync());
1342 auto skip = nanoseconds{seconds{mSamplesPos}} / mCodecCtx->sample_rate;
1343 mDeviceStartTime -= skip;
1344 mCurrentPts += skip;
1347 while(1)
1349 ALenum state;
1350 if(mBufferDataSize > 0)
1352 alGetSourcei(mSource, AL_SOURCE_STATE, &state);
1353 /* If mQuit is set, don't actually quit until we can't get more
1354 * audio, indicating we've reached the flush packet and the packet
1355 * sender will also quit.
1357 * If mQuit is not set, don't quit even if there's no more audio,
1358 * so what's buffered has a chance to play to the real end.
1360 if(!readAudio(getSync()) && mMovie.mQuit.load(std::memory_order_relaxed))
1361 goto finish;
1363 else
1365 ALint processed, queued;
1367 /* First remove any processed buffers. */
1368 alGetSourcei(mSource, AL_BUFFERS_PROCESSED, &processed);
1369 while(processed > 0)
1371 ALuint bid;
1372 alSourceUnqueueBuffers(mSource, 1, &bid);
1373 --processed;
1376 /* Refill the buffer queue. */
1377 int sync_skip{getSync()};
1378 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
1379 while(static_cast<ALuint>(queued) < mBuffers.size())
1381 /* Read the next chunk of data, filling the buffer, and queue
1382 * it on the source.
1384 const bool got_audio{readAudio(samples.get(), static_cast<ALuint>(buffer_len),
1385 sync_skip)};
1386 if(!got_audio)
1388 if(mMovie.mQuit.load(std::memory_order_relaxed))
1389 goto finish;
1390 break;
1393 const ALuint bufid{mBuffers[mBufferIdx]};
1394 mBufferIdx = static_cast<ALuint>((mBufferIdx+1) % mBuffers.size());
1396 alBufferData(bufid, mFormat, samples.get(), buffer_len, mCodecCtx->sample_rate);
1397 alSourceQueueBuffers(mSource, 1, &bufid);
1398 ++queued;
1401 /* Check that the source is playing. */
1402 alGetSourcei(mSource, AL_SOURCE_STATE, &state);
1403 if(state == AL_STOPPED)
1405 /* AL_STOPPED means there was an underrun. Clear the buffer
1406 * queue since this likely means we're late, and rewind the
1407 * source to get it back into an AL_INITIAL state.
1409 alSourceRewind(mSource);
1410 alSourcei(mSource, AL_BUFFER, 0);
1411 if(alcGetInteger64vSOFT)
1413 /* Also update the device start time with the current
1414 * device clock, so the decoder knows we're running behind.
1416 int64_t devtime{};
1417 alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()),
1418 ALC_DEVICE_CLOCK_SOFT, 1, &devtime);
1419 mDeviceStartTime = nanoseconds{devtime} - mCurrentPts;
1421 continue;
1425 /* (re)start the source if needed, and wait for a buffer to finish */
1426 if(state != AL_PLAYING && state != AL_PAUSED)
1428 if(!startPlayback())
1429 break;
1431 if(ALenum err{alGetError()})
1432 std::cerr<< "Got AL error: 0x"<<std::hex<<err<<std::dec
1433 << " ("<<alGetString(err)<<")" <<std::endl;
1435 mSrcCond.wait_for(srclock, sleep_time);
1437 finish:
1439 alSourceRewind(mSource);
1440 alSourcei(mSource, AL_BUFFER, 0);
1441 srclock.unlock();
1443 return 0;
1447 nanoseconds VideoState::getClock()
1449 /* NOTE: This returns incorrect times while not playing. */
1450 std::lock_guard<std::mutex> _{mDispPtsMutex};
1451 if(mDisplayPtsTime == microseconds::min())
1452 return nanoseconds::zero();
1453 auto delta = get_avtime() - mDisplayPtsTime;
1454 return mDisplayPts + delta;
1457 /* Called by VideoState::updateVideo to display the next video frame. */
1458 void VideoState::display(SDL_Window *screen, SDL_Renderer *renderer, AVFrame *frame)
1460 if(!mImage)
1461 return;
1463 double aspect_ratio;
1464 int win_w, win_h;
1465 int w, h, x, y;
1467 int frame_width{frame->width - static_cast<int>(frame->crop_left + frame->crop_right)};
1468 int frame_height{frame->height - static_cast<int>(frame->crop_top + frame->crop_bottom)};
1469 if(frame->sample_aspect_ratio.num == 0)
1470 aspect_ratio = 0.0;
1471 else
1473 aspect_ratio = av_q2d(frame->sample_aspect_ratio) * frame_width /
1474 frame_height;
1476 if(aspect_ratio <= 0.0)
1477 aspect_ratio = static_cast<double>(frame_width) / frame_height;
1479 SDL_GetWindowSize(screen, &win_w, &win_h);
1480 h = win_h;
1481 w = (static_cast<int>(std::rint(h * aspect_ratio)) + 3) & ~3;
1482 if(w > win_w)
1484 w = win_w;
1485 h = (static_cast<int>(std::rint(w / aspect_ratio)) + 3) & ~3;
1487 x = (win_w - w) / 2;
1488 y = (win_h - h) / 2;
1490 SDL_Rect src_rect{ static_cast<int>(frame->crop_left), static_cast<int>(frame->crop_top),
1491 frame_width, frame_height };
1492 SDL_Rect dst_rect{ x, y, w, h };
1493 SDL_RenderCopy(renderer, mImage, &src_rect, &dst_rect);
1494 SDL_RenderPresent(renderer);
1497 /* Called regularly on the main thread where the SDL_Renderer was created. It
1498 * handles updating the textures of decoded frames and displaying the latest
1499 * frame.
1501 void VideoState::updateVideo(SDL_Window *screen, SDL_Renderer *renderer, bool redraw)
1503 size_t read_idx{mPictQRead.load(std::memory_order_relaxed)};
1504 Picture *vp{&mPictQ[read_idx]};
1506 auto clocktime = mMovie.getMasterClock();
1507 bool updated{false};
1508 while(1)
1510 size_t next_idx{(read_idx+1)%mPictQ.size()};
1511 if(next_idx == mPictQWrite.load(std::memory_order_acquire))
1512 break;
1513 Picture *nextvp{&mPictQ[next_idx]};
1514 if(clocktime < nextvp->mPts && !mMovie.mQuit.load(std::memory_order_relaxed))
1516 /* For the first update, ensure the first frame gets shown. */
1517 if(!mFirstUpdate || updated)
1518 break;
1521 vp = nextvp;
1522 updated = true;
1523 read_idx = next_idx;
1525 if(mMovie.mQuit.load(std::memory_order_relaxed))
1527 if(mEOS)
1528 mFinalUpdate = true;
1529 mPictQRead.store(read_idx, std::memory_order_release);
1530 std::unique_lock<std::mutex>{mPictQMutex}.unlock();
1531 mPictQCond.notify_one();
1532 return;
1535 AVFrame *frame{vp->mFrame.get()};
1536 if(updated)
1538 mPictQRead.store(read_idx, std::memory_order_release);
1539 std::unique_lock<std::mutex>{mPictQMutex}.unlock();
1540 mPictQCond.notify_one();
1542 /* allocate or resize the buffer! */
1543 bool fmt_updated{false};
1544 if(!mImage || mWidth != frame->width || mHeight != frame->height)
1546 fmt_updated = true;
1547 if(mImage)
1548 SDL_DestroyTexture(mImage);
1549 mImage = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
1550 frame->width, frame->height);
1551 if(!mImage)
1552 std::cerr<< "Failed to create YV12 texture!" <<std::endl;
1553 mWidth = frame->width;
1554 mHeight = frame->height;
1557 int frame_width{frame->width - static_cast<int>(frame->crop_left + frame->crop_right)};
1558 int frame_height{frame->height - static_cast<int>(frame->crop_top + frame->crop_bottom)};
1559 if(mFirstUpdate && frame_width > 0 && frame_height > 0)
1561 /* For the first update, set the window size to the video size. */
1562 mFirstUpdate = false;
1564 if(frame->sample_aspect_ratio.den != 0)
1566 double aspect_ratio = av_q2d(frame->sample_aspect_ratio);
1567 if(aspect_ratio >= 1.0)
1568 frame_width = static_cast<int>(frame_width*aspect_ratio + 0.5);
1569 else if(aspect_ratio > 0.0)
1570 frame_height = static_cast<int>(frame_height/aspect_ratio + 0.5);
1572 SDL_SetWindowSize(screen, frame_width, frame_height);
1575 if(mImage)
1577 void *pixels{nullptr};
1578 int pitch{0};
1580 if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P)
1581 SDL_UpdateYUVTexture(mImage, nullptr,
1582 frame->data[0], frame->linesize[0],
1583 frame->data[1], frame->linesize[1],
1584 frame->data[2], frame->linesize[2]
1586 else if(SDL_LockTexture(mImage, nullptr, &pixels, &pitch) != 0)
1587 std::cerr<< "Failed to lock texture" <<std::endl;
1588 else
1590 // Convert the image into YUV format that SDL uses
1591 int w{frame->width};
1592 int h{frame->height};
1593 if(!mSwscaleCtx || fmt_updated)
1595 mSwscaleCtx.reset(sws_getContext(
1596 w, h, mCodecCtx->pix_fmt,
1597 w, h, AV_PIX_FMT_YUV420P, 0,
1598 nullptr, nullptr, nullptr
1602 /* point pict at the queue */
1603 uint8_t *pict_data[3];
1604 pict_data[0] = static_cast<uint8_t*>(pixels);
1605 pict_data[1] = pict_data[0] + w*h;
1606 pict_data[2] = pict_data[1] + w*h/4;
1608 int pict_linesize[3];
1609 pict_linesize[0] = pitch;
1610 pict_linesize[1] = pitch / 2;
1611 pict_linesize[2] = pitch / 2;
1613 sws_scale(mSwscaleCtx.get(), reinterpret_cast<uint8_t**>(frame->data), frame->linesize,
1614 0, h, pict_data, pict_linesize);
1615 SDL_UnlockTexture(mImage);
1618 redraw = true;
1622 if(redraw)
1624 /* Show the picture! */
1625 display(screen, renderer, frame);
1628 if(updated)
1630 auto disp_time = get_avtime();
1632 std::lock_guard<std::mutex> _{mDispPtsMutex};
1633 mDisplayPts = vp->mPts;
1634 mDisplayPtsTime = disp_time;
1636 if(mEOS.load(std::memory_order_acquire))
1638 if((read_idx+1)%mPictQ.size() == mPictQWrite.load(std::memory_order_acquire))
1640 mFinalUpdate = true;
1641 std::unique_lock<std::mutex>{mPictQMutex}.unlock();
1642 mPictQCond.notify_one();
1647 int VideoState::handler()
1649 std::for_each(mPictQ.begin(), mPictQ.end(),
1650 [](Picture &pict) -> void
1651 { pict.mFrame = AVFramePtr{av_frame_alloc()}; });
1653 /* Prefill the codec buffer. */
1654 auto packet_sender = [this]()
1656 while(1)
1658 const int ret{mQueue.sendPacket(mCodecCtx.get())};
1659 if(ret == AVErrorEOF) break;
1662 auto sender = std::async(std::launch::async, packet_sender);
1665 std::lock_guard<std::mutex> _{mDispPtsMutex};
1666 mDisplayPtsTime = get_avtime();
1669 auto current_pts = nanoseconds::zero();
1670 while(1)
1672 size_t write_idx{mPictQWrite.load(std::memory_order_relaxed)};
1673 Picture *vp{&mPictQ[write_idx]};
1675 /* Retrieve video frame. */
1676 AVFrame *decoded_frame{vp->mFrame.get()};
1677 while(int ret{mQueue.receiveFrame(mCodecCtx.get(), decoded_frame)})
1679 if(ret == AVErrorEOF) goto finish;
1680 std::cerr<< "Failed to receive frame: "<<ret <<std::endl;
1683 /* Get the PTS for this frame. */
1684 if(decoded_frame->best_effort_timestamp != AVNoPtsValue)
1685 current_pts = duration_cast<nanoseconds>(seconds_d64{av_q2d(mStream->time_base) *
1686 static_cast<double>(decoded_frame->best_effort_timestamp)});
1687 vp->mPts = current_pts;
1689 /* Update the video clock to the next expected PTS. */
1690 auto frame_delay = av_q2d(mCodecCtx->time_base);
1691 frame_delay += decoded_frame->repeat_pict * (frame_delay * 0.5);
1692 current_pts += duration_cast<nanoseconds>(seconds_d64{frame_delay});
1694 /* Put the frame in the queue to be loaded into a texture and displayed
1695 * by the rendering thread.
1697 write_idx = (write_idx+1)%mPictQ.size();
1698 mPictQWrite.store(write_idx, std::memory_order_release);
1700 if(write_idx == mPictQRead.load(std::memory_order_acquire))
1702 /* Wait until we have space for a new pic */
1703 std::unique_lock<std::mutex> lock{mPictQMutex};
1704 while(write_idx == mPictQRead.load(std::memory_order_acquire))
1705 mPictQCond.wait(lock);
1708 finish:
1709 mEOS = true;
1711 std::unique_lock<std::mutex> lock{mPictQMutex};
1712 while(!mFinalUpdate) mPictQCond.wait(lock);
1714 return 0;
1718 int MovieState::decode_interrupt_cb(void *ctx)
1720 return static_cast<MovieState*>(ctx)->mQuit.load(std::memory_order_relaxed);
1723 bool MovieState::prepare()
1725 AVIOContext *avioctx{nullptr};
1726 AVIOInterruptCB intcb{decode_interrupt_cb, this};
1727 if(avio_open2(&avioctx, mFilename.c_str(), AVIO_FLAG_READ, &intcb, nullptr))
1729 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1730 return false;
1732 mIOContext.reset(avioctx);
1734 /* Open movie file. If avformat_open_input fails it will automatically free
1735 * this context, so don't set it onto a smart pointer yet.
1737 AVFormatContext *fmtctx{avformat_alloc_context()};
1738 fmtctx->pb = mIOContext.get();
1739 fmtctx->interrupt_callback = intcb;
1740 if(avformat_open_input(&fmtctx, mFilename.c_str(), nullptr, nullptr) != 0)
1742 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1743 return false;
1745 mFormatCtx.reset(fmtctx);
1747 /* Retrieve stream information */
1748 if(avformat_find_stream_info(mFormatCtx.get(), nullptr) < 0)
1750 std::cerr<< mFilename<<": failed to find stream info" <<std::endl;
1751 return false;
1754 /* Dump information about file onto standard error */
1755 av_dump_format(mFormatCtx.get(), 0, mFilename.c_str(), 0);
1757 mParseThread = std::thread{std::mem_fn(&MovieState::parse_handler), this};
1759 std::unique_lock<std::mutex> slock{mStartupMutex};
1760 while(!mStartupDone) mStartupCond.wait(slock);
1761 return true;
1764 void MovieState::setTitle(SDL_Window *window)
1766 auto pos1 = mFilename.rfind('/');
1767 auto pos2 = mFilename.rfind('\\');
1768 auto fpos = ((pos1 == std::string::npos) ? pos2 :
1769 (pos2 == std::string::npos) ? pos1 :
1770 std::max(pos1, pos2)) + 1;
1771 SDL_SetWindowTitle(window, (mFilename.substr(fpos)+" - "+AppName).c_str());
1774 nanoseconds MovieState::getClock()
1776 if(mClockBase == microseconds::min())
1777 return nanoseconds::zero();
1778 return get_avtime() - mClockBase;
1781 nanoseconds MovieState::getMasterClock()
1783 if(mAVSyncType == SyncMaster::Video && mVideo.mStream)
1784 return mVideo.getClock();
1785 if(mAVSyncType == SyncMaster::Audio && mAudio.mStream)
1786 return mAudio.getClock();
1787 return getClock();
1790 nanoseconds MovieState::getDuration()
1791 { return std::chrono::duration<int64_t,std::ratio<1,AV_TIME_BASE>>(mFormatCtx->duration); }
1793 int MovieState::streamComponentOpen(unsigned int stream_index)
1795 if(stream_index >= mFormatCtx->nb_streams)
1796 return -1;
1798 /* Get a pointer to the codec context for the stream, and open the
1799 * associated codec.
1801 AVCodecCtxPtr avctx{avcodec_alloc_context3(nullptr)};
1802 if(!avctx) return -1;
1804 if(avcodec_parameters_to_context(avctx.get(), mFormatCtx->streams[stream_index]->codecpar))
1805 return -1;
1807 const AVCodec *codec{avcodec_find_decoder(avctx->codec_id)};
1808 if(!codec || avcodec_open2(avctx.get(), codec, nullptr) < 0)
1810 std::cerr<< "Unsupported codec: "<<avcodec_get_name(avctx->codec_id)
1811 << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl;
1812 return -1;
1815 /* Initialize and start the media type handler */
1816 switch(avctx->codec_type)
1818 case AVMEDIA_TYPE_AUDIO:
1819 mAudio.mStream = mFormatCtx->streams[stream_index];
1820 mAudio.mCodecCtx = std::move(avctx);
1821 break;
1823 case AVMEDIA_TYPE_VIDEO:
1824 mVideo.mStream = mFormatCtx->streams[stream_index];
1825 mVideo.mCodecCtx = std::move(avctx);
1826 break;
1828 default:
1829 return -1;
1832 return static_cast<int>(stream_index);
1835 int MovieState::parse_handler()
1837 auto &audio_queue = mAudio.mQueue;
1838 auto &video_queue = mVideo.mQueue;
1840 int video_index{-1};
1841 int audio_index{-1};
1843 /* Find the first video and audio streams */
1844 for(unsigned int i{0u};i < mFormatCtx->nb_streams;i++)
1846 auto codecpar = mFormatCtx->streams[i]->codecpar;
1847 if(codecpar->codec_type == AVMEDIA_TYPE_VIDEO && !DisableVideo && video_index < 0)
1848 video_index = streamComponentOpen(i);
1849 else if(codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
1850 audio_index = streamComponentOpen(i);
1854 std::unique_lock<std::mutex> slock{mStartupMutex};
1855 mStartupDone = true;
1857 mStartupCond.notify_all();
1859 if(video_index < 0 && audio_index < 0)
1861 std::cerr<< mFilename<<": could not open codecs" <<std::endl;
1862 mQuit = true;
1865 /* Set the base time 750ms ahead of the current av time. */
1866 mClockBase = get_avtime() + milliseconds{750};
1868 if(audio_index >= 0)
1869 mAudioThread = std::thread{std::mem_fn(&AudioState::handler), &mAudio};
1870 if(video_index >= 0)
1871 mVideoThread = std::thread{std::mem_fn(&VideoState::handler), &mVideo};
1873 /* Main packet reading/dispatching loop */
1874 AVPacketPtr packet{av_packet_alloc()};
1875 while(!mQuit.load(std::memory_order_relaxed))
1877 if(av_read_frame(mFormatCtx.get(), packet.get()) < 0)
1878 break;
1880 /* Copy the packet into the queue it's meant for. */
1881 if(packet->stream_index == video_index)
1883 while(!mQuit.load(std::memory_order_acquire) && !video_queue.put(packet.get()))
1884 std::this_thread::sleep_for(milliseconds{100});
1886 else if(packet->stream_index == audio_index)
1888 while(!mQuit.load(std::memory_order_acquire) && !audio_queue.put(packet.get()))
1889 std::this_thread::sleep_for(milliseconds{100});
1892 av_packet_unref(packet.get());
1894 /* Finish the queues so the receivers know nothing more is coming. */
1895 video_queue.setFinished();
1896 audio_queue.setFinished();
1898 /* all done - wait for it */
1899 if(mVideoThread.joinable())
1900 mVideoThread.join();
1901 if(mAudioThread.joinable())
1902 mAudioThread.join();
1904 mVideo.mEOS = true;
1905 std::unique_lock<std::mutex> lock{mVideo.mPictQMutex};
1906 while(!mVideo.mFinalUpdate)
1907 mVideo.mPictQCond.wait(lock);
1908 lock.unlock();
1910 SDL_Event evt{};
1911 evt.user.type = FF_MOVIE_DONE_EVENT;
1912 SDL_PushEvent(&evt);
1914 return 0;
1917 void MovieState::stop()
1919 mQuit = true;
1920 mAudio.mQueue.flush();
1921 mVideo.mQueue.flush();
1925 // Helper class+method to print the time with human-readable formatting.
1926 struct PrettyTime {
1927 seconds mTime;
1929 std::ostream &operator<<(std::ostream &os, const PrettyTime &rhs)
1931 using hours = std::chrono::hours;
1932 using minutes = std::chrono::minutes;
1934 seconds t{rhs.mTime};
1935 if(t.count() < 0)
1937 os << '-';
1938 t *= -1;
1941 // Only handle up to hour formatting
1942 if(t >= hours{1})
1943 os << duration_cast<hours>(t).count() << 'h' << std::setfill('0') << std::setw(2)
1944 << (duration_cast<minutes>(t).count() % 60) << 'm';
1945 else
1946 os << duration_cast<minutes>(t).count() << 'm' << std::setfill('0');
1947 os << std::setw(2) << (duration_cast<seconds>(t).count() % 60) << 's' << std::setw(0)
1948 << std::setfill(' ');
1949 return os;
1952 } // namespace
1955 int main(int argc, char *argv[])
1957 std::unique_ptr<MovieState> movState;
1959 if(argc < 2)
1961 std::cerr<< "Usage: "<<argv[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl;
1962 return 1;
1964 /* Register all formats and codecs */
1965 #if !(LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(58, 9, 100))
1966 av_register_all();
1967 #endif
1968 /* Initialize networking protocols */
1969 avformat_network_init();
1971 if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_EVENTS))
1973 std::cerr<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl;
1974 return 1;
1977 /* Make a window to put our video */
1978 SDL_Window *screen{SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE)};
1979 if(!screen)
1981 std::cerr<< "SDL: could not set video mode - exiting" <<std::endl;
1982 return 1;
1984 /* Make a renderer to handle the texture image surface and rendering. */
1985 Uint32 render_flags{SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC};
1986 SDL_Renderer *renderer{SDL_CreateRenderer(screen, -1, render_flags)};
1987 if(renderer)
1989 SDL_RendererInfo rinf{};
1990 bool ok{false};
1992 /* Make sure the renderer supports IYUV textures. If not, fallback to a
1993 * software renderer. */
1994 if(SDL_GetRendererInfo(renderer, &rinf) == 0)
1996 for(Uint32 i{0u};!ok && i < rinf.num_texture_formats;i++)
1997 ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_IYUV);
1999 if(!ok)
2001 std::cerr<< "IYUV pixelformat textures not supported on renderer "<<rinf.name <<std::endl;
2002 SDL_DestroyRenderer(renderer);
2003 renderer = nullptr;
2006 if(!renderer)
2008 render_flags = SDL_RENDERER_SOFTWARE | SDL_RENDERER_PRESENTVSYNC;
2009 renderer = SDL_CreateRenderer(screen, -1, render_flags);
2011 if(!renderer)
2013 std::cerr<< "SDL: could not create renderer - exiting" <<std::endl;
2014 return 1;
2016 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
2017 SDL_RenderFillRect(renderer, nullptr);
2018 SDL_RenderPresent(renderer);
2020 /* Open an audio device */
2021 ++argv; --argc;
2022 if(InitAL(&argv, &argc))
2024 std::cerr<< "Failed to set up audio device" <<std::endl;
2025 return 1;
2029 auto device = alcGetContextsDevice(alcGetCurrentContext());
2030 if(alcIsExtensionPresent(device, "ALC_SOFT_device_clock"))
2032 std::cout<< "Found ALC_SOFT_device_clock" <<std::endl;
2033 alcGetInteger64vSOFT = reinterpret_cast<LPALCGETINTEGER64VSOFT>(
2034 alcGetProcAddress(device, "alcGetInteger64vSOFT")
2039 if(alIsExtensionPresent("AL_SOFT_source_latency"))
2041 std::cout<< "Found AL_SOFT_source_latency" <<std::endl;
2042 alGetSourcei64vSOFT = reinterpret_cast<LPALGETSOURCEI64VSOFT>(
2043 alGetProcAddress("alGetSourcei64vSOFT")
2046 if(alIsExtensionPresent("AL_SOFT_events"))
2048 std::cout<< "Found AL_SOFT_events" <<std::endl;
2049 alEventControlSOFT = reinterpret_cast<LPALEVENTCONTROLSOFT>(
2050 alGetProcAddress("alEventControlSOFT"));
2051 alEventCallbackSOFT = reinterpret_cast<LPALEVENTCALLBACKSOFT>(
2052 alGetProcAddress("alEventCallbackSOFT"));
2054 #ifdef AL_SOFT_callback_buffer
2055 if(alIsExtensionPresent("AL_SOFTX_callback_buffer"))
2057 std::cout<< "Found AL_SOFT_callback_buffer" <<std::endl;
2058 alBufferCallbackSOFT = reinterpret_cast<LPALBUFFERCALLBACKSOFT>(
2059 alGetProcAddress("alBufferCallbackSOFT"));
2061 #endif
2063 int fileidx{0};
2064 for(;fileidx < argc;++fileidx)
2066 if(strcmp(argv[fileidx], "-direct") == 0)
2068 if(alIsExtensionPresent("AL_SOFT_direct_channels_remix"))
2070 std::cout<< "Found AL_SOFT_direct_channels_remix" <<std::endl;
2071 DirectOutMode = AL_REMIX_UNMATCHED_SOFT;
2073 else if(alIsExtensionPresent("AL_SOFT_direct_channels"))
2075 std::cout<< "Found AL_SOFT_direct_channels" <<std::endl;
2076 DirectOutMode = AL_DROP_UNMATCHED_SOFT;
2078 else
2079 std::cerr<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl;
2081 else if(strcmp(argv[fileidx], "-wide") == 0)
2083 if(!alIsExtensionPresent("AL_EXT_STEREO_ANGLES"))
2084 std::cerr<< "AL_EXT_STEREO_ANGLES not supported for wide stereo" <<std::endl;
2085 else
2087 std::cout<< "Found AL_EXT_STEREO_ANGLES" <<std::endl;
2088 EnableWideStereo = true;
2091 else if(strcmp(argv[fileidx], "-uhj") == 0)
2093 #ifdef AL_SOFT_UHJ
2094 if(!alIsExtensionPresent("AL_SOFTX_UHJ"))
2095 std::cerr<< "AL_SOFT_UHJ not supported for UHJ decoding" <<std::endl;
2096 else
2098 std::cout<< "Found AL_SOFT_UHJ" <<std::endl;
2099 FormatStereo8 = AL_FORMAT_UHJ2CHN8_SOFT;
2100 FormatStereo16 = AL_FORMAT_UHJ2CHN16_SOFT;
2101 FormatStereo32F = AL_FORMAT_UHJ2CHN_FLOAT32_SOFT;
2103 #else
2104 std::cerr<< "AL_SOFT_UHJ not supported for UHJ decoding" <<std::endl;
2105 #endif
2107 else if(strcmp(argv[fileidx], "-superstereo") == 0)
2109 #ifdef AL_SOFT_UHJ
2110 if(!alIsExtensionPresent("AL_SOFTX_UHJ"))
2111 std::cerr<< "AL_SOFT_UHJ not supported for Super Stereo decoding" <<std::endl;
2112 else
2114 std::cout<< "Found AL_SOFT_UHJ (Super Stereo)" <<std::endl;
2115 EnableSuperStereo = true;
2117 #else
2118 std::cerr<< "AL_SOFT_UHJ not supported for Super Stereo decoding" <<std::endl;
2119 #endif
2121 else if(strcmp(argv[fileidx], "-novideo") == 0)
2122 DisableVideo = true;
2123 else
2124 break;
2127 while(fileidx < argc && !movState)
2129 movState = std::unique_ptr<MovieState>{new MovieState{argv[fileidx++]}};
2130 if(!movState->prepare()) movState = nullptr;
2132 if(!movState)
2134 std::cerr<< "Could not start a video" <<std::endl;
2135 return 1;
2137 movState->setTitle(screen);
2139 /* Default to going to the next movie at the end of one. */
2140 enum class EomAction {
2141 Next, Quit
2142 } eom_action{EomAction::Next};
2143 seconds last_time{seconds::min()};
2144 while(1)
2146 /* SDL_WaitEventTimeout is broken, just force a 10ms sleep. */
2147 std::this_thread::sleep_for(milliseconds{10});
2149 auto cur_time = std::chrono::duration_cast<seconds>(movState->getMasterClock());
2150 if(cur_time != last_time)
2152 auto end_time = std::chrono::duration_cast<seconds>(movState->getDuration());
2153 std::cout<< " \r "<<PrettyTime{cur_time}<<" / "<<PrettyTime{end_time} <<std::flush;
2154 last_time = cur_time;
2157 bool force_redraw{false};
2158 SDL_Event event{};
2159 while(SDL_PollEvent(&event) != 0)
2161 switch(event.type)
2163 case SDL_KEYDOWN:
2164 switch(event.key.keysym.sym)
2166 case SDLK_ESCAPE:
2167 movState->stop();
2168 eom_action = EomAction::Quit;
2169 break;
2171 case SDLK_n:
2172 movState->stop();
2173 eom_action = EomAction::Next;
2174 break;
2176 default:
2177 break;
2179 break;
2181 case SDL_WINDOWEVENT:
2182 switch(event.window.event)
2184 case SDL_WINDOWEVENT_RESIZED:
2185 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
2186 SDL_RenderFillRect(renderer, nullptr);
2187 force_redraw = true;
2188 break;
2190 case SDL_WINDOWEVENT_EXPOSED:
2191 force_redraw = true;
2192 break;
2194 default:
2195 break;
2197 break;
2199 case SDL_QUIT:
2200 movState->stop();
2201 eom_action = EomAction::Quit;
2202 break;
2204 case FF_MOVIE_DONE_EVENT:
2205 std::cout<<'\n';
2206 last_time = seconds::min();
2207 if(eom_action != EomAction::Quit)
2209 movState = nullptr;
2210 while(fileidx < argc && !movState)
2212 movState = std::unique_ptr<MovieState>{new MovieState{argv[fileidx++]}};
2213 if(!movState->prepare()) movState = nullptr;
2215 if(movState)
2217 movState->setTitle(screen);
2218 break;
2222 /* Nothing more to play. Shut everything down and quit. */
2223 movState = nullptr;
2225 CloseAL();
2227 SDL_DestroyRenderer(renderer);
2228 renderer = nullptr;
2229 SDL_DestroyWindow(screen);
2230 screen = nullptr;
2232 SDL_Quit();
2233 exit(0);
2235 default:
2236 break;
2240 movState->mVideo.updateVideo(screen, renderer, force_redraw);
2243 std::cerr<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl;
2244 return 1;