Include fmt 11.0.2
[openal-soft.git] / examples / alffplay.cpp
blob4f89ff588ed5d96a6aaa267409e98cf2f9b2bc2e
1 /*
2 * An example showing how to play a stream sync'd to video, using ffmpeg.
4 * Requires C++14.
5 */
7 #include <algorithm>
8 #include <array>
9 #include <atomic>
10 #include <cassert>
11 #include <cerrno>
12 #include <chrono>
13 #include <cmath>
14 #include <condition_variable>
15 #include <cstdint>
16 #include <cstdio>
17 #include <cstdlib>
18 #include <cstring>
19 #include <deque>
20 #include <functional>
21 #include <future>
22 #include <iomanip>
23 #include <iostream>
24 #include <memory>
25 #include <mutex>
26 #include <ratio>
27 #include <string>
28 #include <string_view>
29 #include <thread>
30 #include <utility>
31 #include <vector>
33 #ifdef __GNUC__
34 _Pragma("GCC diagnostic push")
35 _Pragma("GCC diagnostic ignored \"-Wconversion\"")
36 _Pragma("GCC diagnostic ignored \"-Wold-style-cast\"")
37 #endif
38 extern "C" {
39 #include "libavcodec/avcodec.h"
40 #include "libavformat/avformat.h"
41 #include "libavformat/avio.h"
42 #include "libavformat/version.h"
43 #include "libavutil/avutil.h"
44 #include "libavutil/error.h"
45 #include "libavutil/frame.h"
46 #include "libavutil/mem.h"
47 #include "libavutil/pixfmt.h"
48 #include "libavutil/rational.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/time.h"
51 #include "libavutil/version.h"
52 #include "libavutil/channel_layout.h"
53 #include "libswscale/swscale.h"
54 #include "libswresample/swresample.h"
56 constexpr auto AVNoPtsValue = AV_NOPTS_VALUE;
57 constexpr auto AVErrorEOF = AVERROR_EOF;
59 struct SwsContext;
62 #define SDL_MAIN_HANDLED
63 #include "SDL.h"
64 #ifdef __GNUC__
65 _Pragma("GCC diagnostic pop")
66 #endif
68 #include "AL/alc.h"
69 #include "AL/al.h"
70 #include "AL/alext.h"
72 #include "almalloc.h"
73 #include "alnumbers.h"
74 #include "alnumeric.h"
75 #include "alspan.h"
76 #include "common/alhelpers.h"
79 namespace {
81 using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1_i64<<32)>>;
82 using nanoseconds = std::chrono::nanoseconds;
83 using microseconds = std::chrono::microseconds;
84 using milliseconds = std::chrono::milliseconds;
85 using seconds = std::chrono::seconds;
86 using seconds_d64 = std::chrono::duration<double>;
87 using std::chrono::duration_cast;
89 const std::string AppName{"alffplay"};
91 ALenum DirectOutMode{AL_FALSE};
92 bool EnableWideStereo{false};
93 bool EnableUhj{false};
94 bool EnableSuperStereo{false};
95 bool DisableVideo{false};
96 LPALGETSOURCEI64VSOFT alGetSourcei64vSOFT;
97 LPALCGETINTEGER64VSOFT alcGetInteger64vSOFT;
98 LPALEVENTCONTROLSOFT alEventControlSOFT;
99 LPALEVENTCALLBACKSOFT alEventCallbackSOFT;
101 LPALBUFFERCALLBACKSOFT alBufferCallbackSOFT;
103 const seconds AVNoSyncThreshold{10};
105 #define VIDEO_PICTURE_QUEUE_SIZE 24
107 const seconds_d64 AudioSyncThreshold{0.03};
108 const milliseconds AudioSampleCorrectionMax{50};
109 /* Averaging filter coefficient for audio sync. */
110 #define AUDIO_DIFF_AVG_NB 20
111 const double AudioAvgFilterCoeff{std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB)};
112 /* Per-buffer size, in time */
113 constexpr milliseconds AudioBufferTime{20};
114 /* Buffer total size, in time (should be divisible by the buffer time) */
115 constexpr milliseconds AudioBufferTotalTime{800};
116 constexpr auto AudioBufferCount = AudioBufferTotalTime / AudioBufferTime;
118 enum {
119 FF_MOVIE_DONE_EVENT = SDL_USEREVENT
122 enum class SyncMaster {
123 Audio,
124 Video,
125 External,
127 Default = Audio
131 inline microseconds get_avtime()
132 { return microseconds{av_gettime()}; }
134 /* Define unique_ptrs to auto-cleanup associated ffmpeg objects. */
135 struct AVIOContextDeleter {
136 void operator()(AVIOContext *ptr) { avio_closep(&ptr); }
138 using AVIOContextPtr = std::unique_ptr<AVIOContext,AVIOContextDeleter>;
140 struct AVFormatCtxDeleter {
141 void operator()(AVFormatContext *ptr) { avformat_close_input(&ptr); }
143 using AVFormatCtxPtr = std::unique_ptr<AVFormatContext,AVFormatCtxDeleter>;
145 struct AVCodecCtxDeleter {
146 void operator()(AVCodecContext *ptr) { avcodec_free_context(&ptr); }
148 using AVCodecCtxPtr = std::unique_ptr<AVCodecContext,AVCodecCtxDeleter>;
150 struct AVPacketDeleter {
151 void operator()(AVPacket *pkt) { av_packet_free(&pkt); }
153 using AVPacketPtr = std::unique_ptr<AVPacket,AVPacketDeleter>;
155 struct AVFrameDeleter {
156 void operator()(AVFrame *ptr) { av_frame_free(&ptr); }
158 using AVFramePtr = std::unique_ptr<AVFrame,AVFrameDeleter>;
160 struct SwrContextDeleter {
161 void operator()(SwrContext *ptr) { swr_free(&ptr); }
163 using SwrContextPtr = std::unique_ptr<SwrContext,SwrContextDeleter>;
165 struct SwsContextDeleter {
166 void operator()(SwsContext *ptr) { sws_freeContext(ptr); }
168 using SwsContextPtr = std::unique_ptr<SwsContext,SwsContextDeleter>;
171 struct ChannelLayout : public AVChannelLayout {
172 ChannelLayout() : AVChannelLayout{} { }
173 ChannelLayout(const ChannelLayout &rhs) : AVChannelLayout{}
174 { av_channel_layout_copy(this, &rhs); }
175 ~ChannelLayout() { av_channel_layout_uninit(this); }
177 auto operator=(const ChannelLayout &rhs) -> ChannelLayout&
178 { av_channel_layout_copy(this, &rhs); return *this; }
182 class DataQueue {
183 const size_t mSizeLimit;
184 std::mutex mPacketMutex, mFrameMutex;
185 std::condition_variable mPacketCond;
186 std::condition_variable mInFrameCond, mOutFrameCond;
188 std::deque<AVPacketPtr> mPackets;
189 size_t mTotalSize{0};
190 bool mFinished{false};
192 AVPacketPtr getPacket()
194 std::unique_lock<std::mutex> plock{mPacketMutex};
195 while(mPackets.empty() && !mFinished)
196 mPacketCond.wait(plock);
197 if(mPackets.empty())
198 return nullptr;
200 auto ret = std::move(mPackets.front());
201 mPackets.pop_front();
202 mTotalSize -= static_cast<unsigned int>(ret->size);
203 return ret;
206 public:
207 DataQueue(size_t size_limit) : mSizeLimit{size_limit} { }
209 int sendPacket(AVCodecContext *codecctx)
211 AVPacketPtr packet{getPacket()};
213 int ret{};
215 std::unique_lock<std::mutex> flock{mFrameMutex};
216 while((ret=avcodec_send_packet(codecctx, packet.get())) == AVERROR(EAGAIN))
217 mInFrameCond.wait_for(flock, milliseconds{50});
219 mOutFrameCond.notify_one();
221 if(!packet)
223 if(!ret) return AVErrorEOF;
224 std::cerr<< "Failed to send flush packet: "<<ret <<std::endl;
225 return ret;
227 if(ret < 0)
228 std::cerr<< "Failed to send packet: "<<ret <<std::endl;
229 return ret;
232 int receiveFrame(AVCodecContext *codecctx, AVFrame *frame)
234 int ret{};
236 std::unique_lock<std::mutex> flock{mFrameMutex};
237 while((ret=avcodec_receive_frame(codecctx, frame)) == AVERROR(EAGAIN))
238 mOutFrameCond.wait_for(flock, milliseconds{50});
240 mInFrameCond.notify_one();
241 return ret;
244 void setFinished()
247 std::lock_guard<std::mutex> packetlock{mPacketMutex};
248 mFinished = true;
250 mPacketCond.notify_one();
253 void flush()
256 std::lock_guard<std::mutex> packetlock{mPacketMutex};
257 mFinished = true;
259 mPackets.clear();
260 mTotalSize = 0;
262 mPacketCond.notify_one();
265 bool put(const AVPacket *pkt)
268 std::lock_guard<std::mutex> packet_lock{mPacketMutex};
269 if(mTotalSize >= mSizeLimit || mFinished)
270 return false;
272 mPackets.push_back(AVPacketPtr{av_packet_alloc()});
273 if(av_packet_ref(mPackets.back().get(), pkt) != 0)
275 mPackets.pop_back();
276 return true;
279 mTotalSize += static_cast<unsigned int>(mPackets.back()->size);
281 mPacketCond.notify_one();
282 return true;
287 struct MovieState;
289 struct AudioState {
290 MovieState &mMovie;
292 AVStream *mStream{nullptr};
293 AVCodecCtxPtr mCodecCtx;
295 DataQueue mQueue{2_uz*1024_uz*1024_uz};
297 /* Used for clock difference average computation */
298 seconds_d64 mClockDiffAvg{0};
300 /* Time of the next sample to be buffered */
301 nanoseconds mCurrentPts{0};
303 /* Device clock time that the stream started at. */
304 nanoseconds mDeviceStartTime{nanoseconds::min()};
306 /* Decompressed sample frame, and swresample context for conversion */
307 AVFramePtr mDecodedFrame;
308 SwrContextPtr mSwresCtx;
310 /* Conversion format, for what gets fed to OpenAL */
311 uint64_t mDstChanLayout{0};
312 AVSampleFormat mDstSampleFmt{AV_SAMPLE_FMT_NONE};
314 /* Storage of converted samples */
315 std::array<uint8_t*,1> mSamples{};
316 al::span<uint8_t> mSamplesSpan{};
317 int mSamplesLen{0}; /* In samples */
318 int mSamplesPos{0};
319 int mSamplesMax{0};
321 std::vector<uint8_t> mBufferData;
322 std::atomic<size_t> mReadPos{0};
323 std::atomic<size_t> mWritePos{0};
325 /* OpenAL format */
326 ALenum mFormat{AL_NONE};
327 ALuint mFrameSize{0};
329 std::mutex mSrcMutex;
330 std::condition_variable mSrcCond;
331 std::atomic_flag mConnected{};
332 ALuint mSource{0};
333 std::array<ALuint,AudioBufferCount> mBuffers{};
334 ALuint mBufferIdx{0};
336 AudioState(MovieState &movie) : mMovie(movie)
337 { mConnected.test_and_set(std::memory_order_relaxed); }
338 ~AudioState()
340 if(mSource)
341 alDeleteSources(1, &mSource);
342 if(mBuffers[0])
343 alDeleteBuffers(static_cast<ALsizei>(mBuffers.size()), mBuffers.data());
345 av_freep(static_cast<void*>(mSamples.data()));
348 static void AL_APIENTRY eventCallbackC(ALenum eventType, ALuint object, ALuint param,
349 ALsizei length, const ALchar *message, void *userParam) noexcept
350 { static_cast<AudioState*>(userParam)->eventCallback(eventType, object, param, length, message); }
351 void eventCallback(ALenum eventType, ALuint object, ALuint param, ALsizei length,
352 const ALchar *message) noexcept;
354 static ALsizei AL_APIENTRY bufferCallbackC(void *userptr, void *data, ALsizei size) noexcept
355 { return static_cast<AudioState*>(userptr)->bufferCallback(data, size); }
356 ALsizei bufferCallback(void *data, ALsizei size) noexcept;
358 nanoseconds getClockNoLock();
359 nanoseconds getClock()
361 std::lock_guard<std::mutex> lock{mSrcMutex};
362 return getClockNoLock();
365 bool startPlayback();
367 int getSync();
368 int decodeFrame();
369 bool readAudio(al::span<uint8_t> samples, unsigned int length, int &sample_skip);
370 bool readAudio(int sample_skip);
372 int handler();
375 struct VideoState {
376 MovieState &mMovie;
378 AVStream *mStream{nullptr};
379 AVCodecCtxPtr mCodecCtx;
381 DataQueue mQueue{14_uz*1024_uz*1024_uz};
383 /* The pts of the currently displayed frame, and the time (av_gettime) it
384 * was last updated - used to have running video pts
386 nanoseconds mDisplayPts{0};
387 microseconds mDisplayPtsTime{microseconds::min()};
388 std::mutex mDispPtsMutex;
390 /* Swscale context for format conversion */
391 SwsContextPtr mSwscaleCtx;
393 struct Picture {
394 AVFramePtr mFrame{};
395 nanoseconds mPts{nanoseconds::min()};
397 std::array<Picture,VIDEO_PICTURE_QUEUE_SIZE> mPictQ;
398 std::atomic<size_t> mPictQRead{0u}, mPictQWrite{1u};
399 std::mutex mPictQMutex;
400 std::condition_variable mPictQCond;
402 SDL_Texture *mImage{nullptr};
403 int mWidth{0}, mHeight{0}; /* Full texture size */
404 bool mFirstUpdate{true};
406 std::atomic<bool> mEOS{false};
407 std::atomic<bool> mFinalUpdate{false};
409 VideoState(MovieState &movie) : mMovie(movie) { }
410 ~VideoState()
412 if(mImage)
413 SDL_DestroyTexture(mImage);
414 mImage = nullptr;
417 nanoseconds getClock();
419 void display(SDL_Window *screen, SDL_Renderer *renderer, AVFrame *frame) const;
420 void updateVideo(SDL_Window *screen, SDL_Renderer *renderer, bool redraw);
421 int handler();
424 struct MovieState {
425 AVIOContextPtr mIOContext;
426 AVFormatCtxPtr mFormatCtx;
428 SyncMaster mAVSyncType{SyncMaster::Default};
430 microseconds mClockBase{microseconds::min()};
432 std::atomic<bool> mQuit{false};
434 AudioState mAudio;
435 VideoState mVideo;
437 std::mutex mStartupMutex;
438 std::condition_variable mStartupCond;
439 bool mStartupDone{false};
441 std::thread mParseThread;
442 std::thread mAudioThread;
443 std::thread mVideoThread;
445 std::string mFilename;
447 MovieState(std::string_view fname) : mAudio{*this}, mVideo{*this}, mFilename{fname}
449 ~MovieState()
451 stop();
452 if(mParseThread.joinable())
453 mParseThread.join();
456 static int decode_interrupt_cb(void *ctx);
457 bool prepare();
458 void setTitle(SDL_Window *window) const;
459 void stop();
461 [[nodiscard]] nanoseconds getClock() const;
462 [[nodiscard]] nanoseconds getMasterClock();
463 [[nodiscard]] nanoseconds getDuration() const;
465 bool streamComponentOpen(AVStream *stream);
466 int parse_handler();
470 nanoseconds AudioState::getClockNoLock()
472 // The audio clock is the timestamp of the sample currently being heard.
473 if(alcGetInteger64vSOFT)
475 // If device start time = min, we aren't playing yet.
476 if(mDeviceStartTime == nanoseconds::min())
477 return nanoseconds::zero();
479 // Get the current device clock time and latency.
480 auto device = alcGetContextsDevice(alcGetCurrentContext());
481 std::array<ALCint64SOFT,2> devtimes{};
482 alcGetInteger64vSOFT(device, ALC_DEVICE_CLOCK_LATENCY_SOFT, 2, devtimes.data());
483 auto latency = nanoseconds{devtimes[1]};
484 auto device_time = nanoseconds{devtimes[0]};
486 // The clock is simply the current device time relative to the recorded
487 // start time. We can also subtract the latency to get more a accurate
488 // position of where the audio device actually is in the output stream.
489 return device_time - mDeviceStartTime - latency;
492 if(!mBufferData.empty())
494 if(mDeviceStartTime == nanoseconds::min())
495 return nanoseconds::zero();
497 /* With a callback buffer and no device clock, mDeviceStartTime is
498 * actually the timestamp of the first sample frame played. The audio
499 * clock, then, is that plus the current source offset.
501 std::array<ALint64SOFT,2> offset{};
502 if(alGetSourcei64vSOFT)
503 alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_LATENCY_SOFT, offset.data());
504 else
506 ALint ioffset;
507 alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
508 offset[0] = ALint64SOFT{ioffset} << 32;
510 /* NOTE: The source state must be checked last, in case an underrun
511 * occurs and the source stops between getting the state and retrieving
512 * the offset+latency.
514 ALint status;
515 alGetSourcei(mSource, AL_SOURCE_STATE, &status);
517 nanoseconds pts{};
518 if(status == AL_PLAYING || status == AL_PAUSED)
519 pts = mDeviceStartTime - nanoseconds{offset[1]} +
520 duration_cast<nanoseconds>(fixed32{offset[0] / mCodecCtx->sample_rate});
521 else
523 /* If the source is stopped, the pts of the next sample to be heard
524 * is the pts of the next sample to be buffered, minus the amount
525 * already in the buffer ready to play.
527 const size_t woffset{mWritePos.load(std::memory_order_acquire)};
528 const size_t roffset{mReadPos.load(std::memory_order_relaxed)};
529 const size_t readable{((woffset>=roffset) ? woffset : (mBufferData.size()+woffset)) -
530 roffset};
532 pts = mCurrentPts - nanoseconds{seconds{readable/mFrameSize}}/mCodecCtx->sample_rate;
535 return pts;
538 /* The source-based clock is based on 4 components:
539 * 1 - The timestamp of the next sample to buffer (mCurrentPts)
540 * 2 - The length of the source's buffer queue
541 * (AudioBufferTime*AL_BUFFERS_QUEUED)
542 * 3 - The offset OpenAL is currently at in the source (the first value
543 * from AL_SAMPLE_OFFSET_LATENCY_SOFT)
544 * 4 - The latency between OpenAL and the DAC (the second value from
545 * AL_SAMPLE_OFFSET_LATENCY_SOFT)
547 * Subtracting the length of the source queue from the next sample's
548 * timestamp gives the timestamp of the sample at the start of the source
549 * queue. Adding the source offset to that results in the timestamp for the
550 * sample at OpenAL's current position, and subtracting the source latency
551 * from that gives the timestamp of the sample currently at the DAC.
553 nanoseconds pts{mCurrentPts};
554 if(mSource)
556 std::array<ALint64SOFT,2> offset{};
557 if(alGetSourcei64vSOFT)
558 alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_LATENCY_SOFT, offset.data());
559 else
561 ALint ioffset;
562 alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
563 offset[0] = ALint64SOFT{ioffset} << 32;
565 ALint queued, status;
566 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
567 alGetSourcei(mSource, AL_SOURCE_STATE, &status);
569 /* If the source is AL_STOPPED, then there was an underrun and all
570 * buffers are processed, so ignore the source queue. The audio thread
571 * will put the source into an AL_INITIAL state and clear the queue
572 * when it starts recovery.
574 if(status != AL_STOPPED)
576 pts -= AudioBufferTime*queued;
577 pts += duration_cast<nanoseconds>(fixed32{offset[0] / mCodecCtx->sample_rate});
579 /* Don't offset by the latency if the source isn't playing. */
580 if(status == AL_PLAYING)
581 pts -= nanoseconds{offset[1]};
584 return std::max(pts, nanoseconds::zero());
587 bool AudioState::startPlayback()
589 const size_t woffset{mWritePos.load(std::memory_order_acquire)};
590 const size_t roffset{mReadPos.load(std::memory_order_relaxed)};
591 const size_t readable{((woffset >= roffset) ? woffset : (mBufferData.size()+woffset)) -
592 roffset};
594 if(!mBufferData.empty())
596 if(readable == 0)
597 return false;
598 if(!alcGetInteger64vSOFT)
599 mDeviceStartTime = mCurrentPts -
600 nanoseconds{seconds{readable/mFrameSize}}/mCodecCtx->sample_rate;
602 else
604 ALint queued{};
605 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
606 if(queued == 0) return false;
609 alSourcePlay(mSource);
610 if(alcGetInteger64vSOFT)
612 /* Subtract the total buffer queue time from the current pts to get the
613 * pts of the start of the queue.
615 std::array<int64_t,2> srctimes{};
616 alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_CLOCK_SOFT, srctimes.data());
617 auto device_time = nanoseconds{srctimes[1]};
618 auto src_offset = duration_cast<nanoseconds>(fixed32{srctimes[0]}) /
619 mCodecCtx->sample_rate;
621 /* The mixer may have ticked and incremented the device time and sample
622 * offset, so subtract the source offset from the device time to get
623 * the device time the source started at. Also subtract startpts to get
624 * the device time the stream would have started at to reach where it
625 * is now.
627 if(!mBufferData.empty())
629 nanoseconds startpts{mCurrentPts -
630 nanoseconds{seconds{readable/mFrameSize}}/mCodecCtx->sample_rate};
631 mDeviceStartTime = device_time - src_offset - startpts;
633 else
635 nanoseconds startpts{mCurrentPts - AudioBufferTotalTime};
636 mDeviceStartTime = device_time - src_offset - startpts;
639 return true;
642 int AudioState::getSync()
644 if(mMovie.mAVSyncType == SyncMaster::Audio)
645 return 0;
647 auto ref_clock = mMovie.getMasterClock();
648 auto diff = ref_clock - getClockNoLock();
650 if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
652 /* Difference is TOO big; reset accumulated average */
653 mClockDiffAvg = seconds_d64::zero();
654 return 0;
657 /* Accumulate the diffs */
658 mClockDiffAvg = mClockDiffAvg*AudioAvgFilterCoeff + diff;
659 auto avg_diff = mClockDiffAvg*(1.0 - AudioAvgFilterCoeff);
660 if(avg_diff < AudioSyncThreshold/2.0 && avg_diff > -AudioSyncThreshold)
661 return 0;
663 /* Constrain the per-update difference to avoid exceedingly large skips */
664 diff = std::min<nanoseconds>(diff, AudioSampleCorrectionMax);
665 return static_cast<int>(duration_cast<seconds>(diff*mCodecCtx->sample_rate).count());
668 int AudioState::decodeFrame()
670 do {
671 while(int ret{mQueue.receiveFrame(mCodecCtx.get(), mDecodedFrame.get())})
673 if(ret == AVErrorEOF) return 0;
674 std::cerr<< "Failed to receive frame: "<<ret <<std::endl;
676 } while(mDecodedFrame->nb_samples <= 0);
678 /* If provided, update w/ pts */
679 if(mDecodedFrame->best_effort_timestamp != AVNoPtsValue)
680 mCurrentPts = duration_cast<nanoseconds>(seconds_d64{av_q2d(mStream->time_base) *
681 static_cast<double>(mDecodedFrame->best_effort_timestamp)});
683 if(mDecodedFrame->nb_samples > mSamplesMax)
685 av_freep(static_cast<void*>(mSamples.data()));
686 av_samples_alloc(mSamples.data(), nullptr, mCodecCtx->ch_layout.nb_channels,
687 mDecodedFrame->nb_samples, mDstSampleFmt, 0);
688 mSamplesMax = mDecodedFrame->nb_samples;
689 mSamplesSpan = {mSamples[0], static_cast<size_t>(mSamplesMax)*mFrameSize};
691 /* Copy to a local to mark const. Don't know why this can't be implicit. */
692 using data_t = decltype(decltype(mDecodedFrame)::element_type::data);
693 std::array<const uint8_t*,std::extent_v<data_t>> cdata{};
694 std::copy(std::begin(mDecodedFrame->data), std::end(mDecodedFrame->data), cdata.begin());
695 /* Return the amount of sample frames converted */
696 const int data_size{swr_convert(mSwresCtx.get(), mSamples.data(), mDecodedFrame->nb_samples,
697 cdata.data(), mDecodedFrame->nb_samples)};
699 av_frame_unref(mDecodedFrame.get());
700 return data_size;
703 /* Duplicates the sample at in to out, count times. The frame size is a
704 * multiple of the template type size.
706 template<typename T>
707 void sample_dup(al::span<uint8_t> out, al::span<const uint8_t> in, size_t count, size_t frame_size)
709 auto sample = al::span{reinterpret_cast<const T*>(in.data()), in.size()/sizeof(T)};
710 auto dst = al::span{reinterpret_cast<T*>(out.data()), out.size()/sizeof(T)};
712 /* NOTE: frame_size is a multiple of sizeof(T). */
713 const size_t type_mult{frame_size / sizeof(T)};
714 if(type_mult == 1)
715 std::fill_n(dst.begin(), count, sample.front());
716 else for(size_t i{0};i < count;++i)
718 for(size_t j{0};j < type_mult;++j)
719 dst[i*type_mult + j] = sample[j];
723 void sample_dup(al::span<uint8_t> out, al::span<const uint8_t> in, size_t count, size_t frame_size)
725 if((frame_size&7) == 0)
726 sample_dup<uint64_t>(out, in, count, frame_size);
727 else if((frame_size&3) == 0)
728 sample_dup<uint32_t>(out, in, count, frame_size);
729 else if((frame_size&1) == 0)
730 sample_dup<uint16_t>(out, in, count, frame_size);
731 else
732 sample_dup<uint8_t>(out, in, count, frame_size);
735 bool AudioState::readAudio(al::span<uint8_t> samples, unsigned int length, int &sample_skip)
737 unsigned int audio_size{0};
739 /* Read the next chunk of data, refill the buffer, and queue it
740 * on the source */
741 length /= mFrameSize;
742 while(mSamplesLen > 0 && audio_size < length)
744 unsigned int rem{length - audio_size};
745 if(mSamplesPos >= 0)
747 const auto len = static_cast<unsigned int>(mSamplesLen - mSamplesPos);
748 if(rem > len) rem = len;
749 const size_t boffset{static_cast<ALuint>(mSamplesPos) * size_t{mFrameSize}};
750 std::copy_n(mSamplesSpan.cbegin()+ptrdiff_t(boffset), rem*size_t{mFrameSize},
751 samples.begin());
753 else
755 rem = std::min(rem, static_cast<unsigned int>(-mSamplesPos));
757 /* Add samples by copying the first sample */
758 sample_dup(samples, mSamplesSpan, rem, mFrameSize);
761 mSamplesPos += static_cast<int>(rem);
762 mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate;
763 samples = samples.subspan(rem*size_t{mFrameSize});
764 audio_size += rem;
766 while(mSamplesPos >= mSamplesLen)
768 mSamplesLen = decodeFrame();
769 mSamplesPos = std::min(mSamplesLen, sample_skip);
770 if(mSamplesLen <= 0) break;
772 sample_skip -= mSamplesPos;
774 // Adjust the device start time and current pts by the amount we're
775 // skipping/duplicating, so that the clock remains correct for the
776 // current stream position.
777 auto skip = nanoseconds{seconds{mSamplesPos}} / mCodecCtx->sample_rate;
778 mDeviceStartTime -= skip;
779 mCurrentPts += skip;
782 if(audio_size <= 0)
783 return false;
785 if(audio_size < length)
787 const unsigned int rem{length - audio_size};
788 std::fill_n(samples.begin(), rem*mFrameSize,
789 (mDstSampleFmt == AV_SAMPLE_FMT_U8) ? 0x80 : 0x00);
790 mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate;
792 return true;
795 bool AudioState::readAudio(int sample_skip)
797 size_t woffset{mWritePos.load(std::memory_order_acquire)};
798 const size_t roffset{mReadPos.load(std::memory_order_relaxed)};
799 while(mSamplesLen > 0)
801 const size_t nsamples{((roffset > woffset) ? roffset-woffset-1
802 : (roffset == 0) ? (mBufferData.size()-woffset-1)
803 : (mBufferData.size()-woffset)) / mFrameSize};
804 if(!nsamples) break;
806 if(mSamplesPos < 0)
808 const size_t rem{std::min<size_t>(nsamples, static_cast<ALuint>(-mSamplesPos))};
810 sample_dup(al::span{mBufferData}.subspan(woffset), mSamplesSpan, rem, mFrameSize);
811 woffset += rem * mFrameSize;
812 if(woffset == mBufferData.size()) woffset = 0;
813 mWritePos.store(woffset, std::memory_order_release);
815 mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate;
816 mSamplesPos += static_cast<int>(rem);
817 continue;
820 const size_t rem{std::min<size_t>(nsamples, static_cast<ALuint>(mSamplesLen-mSamplesPos))};
821 const size_t boffset{static_cast<ALuint>(mSamplesPos) * size_t{mFrameSize}};
822 const size_t nbytes{rem * mFrameSize};
824 std::copy_n(mSamplesSpan.cbegin()+ptrdiff_t(boffset), nbytes,
825 mBufferData.begin()+ptrdiff_t(woffset));
826 woffset += nbytes;
827 if(woffset == mBufferData.size()) woffset = 0;
828 mWritePos.store(woffset, std::memory_order_release);
830 mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate;
831 mSamplesPos += static_cast<int>(rem);
833 while(mSamplesPos >= mSamplesLen)
835 mSamplesLen = decodeFrame();
836 mSamplesPos = std::min(mSamplesLen, sample_skip);
837 if(mSamplesLen <= 0) return false;
839 sample_skip -= mSamplesPos;
841 auto skip = nanoseconds{seconds{mSamplesPos}} / mCodecCtx->sample_rate;
842 mDeviceStartTime -= skip;
843 mCurrentPts += skip;
847 return true;
851 void AL_APIENTRY AudioState::eventCallback(ALenum eventType, ALuint object, ALuint param,
852 ALsizei length, const ALchar *message) noexcept
854 if(eventType == AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT)
856 /* Temporarily lock the source mutex to ensure it's not between
857 * checking the processed count and going to sleep.
859 std::unique_lock<std::mutex>{mSrcMutex}.unlock();
860 mSrcCond.notify_one();
861 return;
864 std::cout<< "\n---- AL Event on AudioState "<<this<<" ----\nEvent: ";
865 switch(eventType)
867 case AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT: std::cout<< "Buffer completed"; break;
868 case AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT: std::cout<< "Source state changed"; break;
869 case AL_EVENT_TYPE_DISCONNECTED_SOFT: std::cout<< "Disconnected"; break;
870 default:
871 std::cout<< "0x"<<std::hex<<std::setw(4)<<std::setfill('0')<<eventType<<std::dec<<
872 std::setw(0)<<std::setfill(' '); break;
874 std::cout<< "\n"
875 "Object ID: "<<object<<"\n"
876 "Parameter: "<<param<<"\n"
877 "Message: "<<std::string{message, static_cast<ALuint>(length)}<<"\n----"<<
878 std::endl;
880 if(eventType == AL_EVENT_TYPE_DISCONNECTED_SOFT)
883 std::lock_guard<std::mutex> lock{mSrcMutex};
884 mConnected.clear(std::memory_order_release);
886 mSrcCond.notify_one();
890 ALsizei AudioState::bufferCallback(void *data, ALsizei size) noexcept
892 auto dst = al::span{static_cast<ALbyte*>(data), static_cast<ALuint>(size)};
893 ALsizei got{0};
895 size_t roffset{mReadPos.load(std::memory_order_acquire)};
896 while(!dst.empty())
898 const size_t woffset{mWritePos.load(std::memory_order_relaxed)};
899 if(woffset == roffset) break;
901 size_t todo{((woffset < roffset) ? mBufferData.size() : woffset) - roffset};
902 todo = std::min(todo, dst.size());
904 std::copy_n(mBufferData.cbegin()+ptrdiff_t(roffset), todo, dst.begin());
905 dst = dst.subspan(todo);
906 got += static_cast<ALsizei>(todo);
908 roffset += todo;
909 if(roffset == mBufferData.size())
910 roffset = 0;
912 mReadPos.store(roffset, std::memory_order_release);
914 return got;
917 int AudioState::handler()
919 std::unique_lock<std::mutex> srclock{mSrcMutex, std::defer_lock};
920 milliseconds sleep_time{AudioBufferTime / 3};
922 struct EventControlManager {
923 const std::array<ALenum,3> evt_types{{
924 AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT, AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT,
925 AL_EVENT_TYPE_DISCONNECTED_SOFT}};
927 EventControlManager(milliseconds &sleep_time)
929 if(alEventControlSOFT)
931 alEventControlSOFT(static_cast<ALsizei>(evt_types.size()), evt_types.data(),
932 AL_TRUE);
933 alEventCallbackSOFT(&AudioState::eventCallbackC, this);
934 sleep_time = AudioBufferTotalTime;
937 ~EventControlManager()
939 if(alEventControlSOFT)
941 alEventControlSOFT(static_cast<ALsizei>(evt_types.size()), evt_types.data(),
942 AL_FALSE);
943 alEventCallbackSOFT(nullptr, nullptr);
947 EventControlManager event_controller{sleep_time};
949 std::vector<uint8_t> samples;
950 ALsizei buffer_len{0};
952 /* Find a suitable format for OpenAL. */
953 const auto layoutmask = mCodecCtx->ch_layout.u.mask; /* NOLINT(*-union-access) */
954 mDstChanLayout = 0;
955 mFormat = AL_NONE;
956 if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP
957 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_DBL
958 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_DBLP
959 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_S32
960 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_S32P
961 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_S64
962 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_S64P)
963 && alIsExtensionPresent("AL_EXT_FLOAT32"))
965 mDstSampleFmt = AV_SAMPLE_FMT_FLT;
966 mFrameSize = 4;
967 if(mCodecCtx->ch_layout.order == AV_CHANNEL_ORDER_NATIVE)
969 if(alIsExtensionPresent("AL_EXT_MCFORMATS"))
971 if(layoutmask == AV_CH_LAYOUT_7POINT1)
973 mDstChanLayout = layoutmask;
974 mFrameSize *= 8;
975 mFormat = alGetEnumValue("AL_FORMAT_71CHN32");
977 if(layoutmask == AV_CH_LAYOUT_5POINT1 || layoutmask == AV_CH_LAYOUT_5POINT1_BACK)
979 mDstChanLayout = layoutmask;
980 mFrameSize *= 6;
981 mFormat = alGetEnumValue("AL_FORMAT_51CHN32");
983 if(layoutmask == AV_CH_LAYOUT_QUAD)
985 mDstChanLayout = layoutmask;
986 mFrameSize *= 4;
987 mFormat = EnableUhj ? AL_FORMAT_UHJ4CHN_FLOAT32_SOFT
988 : alGetEnumValue("AL_FORMAT_QUAD32");
991 if(layoutmask == AV_CH_LAYOUT_MONO)
993 mDstChanLayout = layoutmask;
994 mFrameSize *= 1;
995 mFormat = AL_FORMAT_MONO_FLOAT32;
998 else if(mCodecCtx->ch_layout.order == AV_CHANNEL_ORDER_AMBISONIC
999 && alIsExtensionPresent("AL_EXT_BFORMAT"))
1001 /* Calculate what should be the ambisonic order from the number of
1002 * channels, and confirm that's the number of channels. Opus allows
1003 * an optional non-diegetic stereo stream with the B-Format stream,
1004 * which we can ignore, so check for that too.
1006 auto order = static_cast<int>(std::sqrt(mCodecCtx->ch_layout.nb_channels)) - 1;
1007 int channels{(order+1) * (order+1)};
1008 if(channels == mCodecCtx->ch_layout.nb_channels
1009 || channels+2 == mCodecCtx->ch_layout.nb_channels)
1011 /* OpenAL only supports first-order with AL_EXT_BFORMAT, which
1012 * is 4 channels for 3D buffers.
1014 mFrameSize *= 4;
1015 mFormat = alGetEnumValue("AL_FORMAT_BFORMAT3D_FLOAT32");
1018 if(!mFormat || mFormat == -1)
1020 mDstChanLayout = AV_CH_LAYOUT_STEREO;
1021 mFrameSize *= 2;
1022 mFormat = EnableUhj ? AL_FORMAT_UHJ2CHN_FLOAT32_SOFT : AL_FORMAT_STEREO_FLOAT32;
1025 if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P)
1027 mDstSampleFmt = AV_SAMPLE_FMT_U8;
1028 mFrameSize = 1;
1029 if(mCodecCtx->ch_layout.order == AV_CHANNEL_ORDER_NATIVE)
1031 if(alIsExtensionPresent("AL_EXT_MCFORMATS"))
1033 if(layoutmask == AV_CH_LAYOUT_7POINT1)
1035 mDstChanLayout = layoutmask;
1036 mFrameSize *= 8;
1037 mFormat = alGetEnumValue("AL_FORMAT_71CHN8");
1039 if(layoutmask == AV_CH_LAYOUT_5POINT1 || layoutmask == AV_CH_LAYOUT_5POINT1_BACK)
1041 mDstChanLayout = layoutmask;
1042 mFrameSize *= 6;
1043 mFormat = alGetEnumValue("AL_FORMAT_51CHN8");
1045 if(layoutmask == AV_CH_LAYOUT_QUAD)
1047 mDstChanLayout = layoutmask;
1048 mFrameSize *= 4;
1049 mFormat = EnableUhj ? AL_FORMAT_UHJ4CHN8_SOFT
1050 : alGetEnumValue("AL_FORMAT_QUAD8");
1053 if(layoutmask == AV_CH_LAYOUT_MONO)
1055 mDstChanLayout = layoutmask;
1056 mFrameSize *= 1;
1057 mFormat = AL_FORMAT_MONO8;
1060 else if(mCodecCtx->ch_layout.order == AV_CHANNEL_ORDER_AMBISONIC
1061 && alIsExtensionPresent("AL_EXT_BFORMAT"))
1063 auto order = static_cast<int>(std::sqrt(mCodecCtx->ch_layout.nb_channels)) - 1;
1064 int channels{(order+1) * (order+1)};
1065 if(channels == mCodecCtx->ch_layout.nb_channels
1066 || channels+2 == mCodecCtx->ch_layout.nb_channels)
1068 mFrameSize *= 4;
1069 mFormat = alGetEnumValue("AL_FORMAT_BFORMAT3D_8");
1072 if(!mFormat || mFormat == -1)
1074 mDstChanLayout = AV_CH_LAYOUT_STEREO;
1075 mFrameSize *= 2;
1076 mFormat = EnableUhj ? AL_FORMAT_UHJ2CHN8_SOFT : AL_FORMAT_STEREO8;
1079 if(!mFormat || mFormat == -1)
1081 mDstSampleFmt = AV_SAMPLE_FMT_S16;
1082 mFrameSize = 2;
1083 if(mCodecCtx->ch_layout.order == AV_CHANNEL_ORDER_NATIVE)
1085 if(alIsExtensionPresent("AL_EXT_MCFORMATS"))
1087 if(layoutmask == AV_CH_LAYOUT_7POINT1)
1089 mDstChanLayout = layoutmask;
1090 mFrameSize *= 8;
1091 mFormat = alGetEnumValue("AL_FORMAT_71CHN16");
1093 if(layoutmask == AV_CH_LAYOUT_5POINT1 || layoutmask == AV_CH_LAYOUT_5POINT1_BACK)
1095 mDstChanLayout = layoutmask;
1096 mFrameSize *= 6;
1097 mFormat = alGetEnumValue("AL_FORMAT_51CHN16");
1099 if(layoutmask == AV_CH_LAYOUT_QUAD)
1101 mDstChanLayout = layoutmask;
1102 mFrameSize *= 4;
1103 mFormat = EnableUhj ? AL_FORMAT_UHJ4CHN16_SOFT
1104 : alGetEnumValue("AL_FORMAT_QUAD16");
1107 if(layoutmask == AV_CH_LAYOUT_MONO)
1109 mDstChanLayout = layoutmask;
1110 mFrameSize *= 1;
1111 mFormat = AL_FORMAT_MONO16;
1114 else if(mCodecCtx->ch_layout.order == AV_CHANNEL_ORDER_AMBISONIC
1115 && alIsExtensionPresent("AL_EXT_BFORMAT"))
1117 auto order = static_cast<int>(std::sqrt(mCodecCtx->ch_layout.nb_channels)) - 1;
1118 int channels{(order+1) * (order+1)};
1119 if(channels == mCodecCtx->ch_layout.nb_channels
1120 || channels+2 == mCodecCtx->ch_layout.nb_channels)
1122 mFrameSize *= 4;
1123 mFormat = alGetEnumValue("AL_FORMAT_BFORMAT3D_16");
1126 if(!mFormat || mFormat == -1)
1128 mDstChanLayout = AV_CH_LAYOUT_STEREO;
1129 mFrameSize *= 2;
1130 mFormat = EnableUhj ? AL_FORMAT_UHJ2CHN16_SOFT : AL_FORMAT_STEREO16;
1134 mSamples.fill(nullptr);
1135 mSamplesSpan = {};
1136 mSamplesMax = 0;
1137 mSamplesPos = 0;
1138 mSamplesLen = 0;
1140 mDecodedFrame.reset(av_frame_alloc());
1141 if(!mDecodedFrame)
1143 std::cerr<< "Failed to allocate audio frame" <<std::endl;
1144 return 0;
1147 /* Note that ffmpeg assumes AmbiX (ACN layout, SN3D normalization). */
1148 const bool has_bfmt_ex{alIsExtensionPresent("AL_SOFT_bformat_ex") != AL_FALSE};
1149 const ALenum ambi_layout{AL_ACN_SOFT};
1150 const ALenum ambi_scale{AL_SN3D_SOFT};
1152 if(!mDstChanLayout)
1154 /* OpenAL only supports first-order ambisonics with AL_EXT_BFORMAT, so
1155 * we have to drop any extra channels.
1157 ChannelLayout layout{};
1158 av_channel_layout_from_string(&layout, "ambisonic 1");
1160 int err{swr_alloc_set_opts2(al::out_ptr(mSwresCtx), &layout, mDstSampleFmt,
1161 mCodecCtx->sample_rate, &mCodecCtx->ch_layout, mCodecCtx->sample_fmt,
1162 mCodecCtx->sample_rate, 0, nullptr)};
1163 if(err != 0)
1165 std::array<char,AV_ERROR_MAX_STRING_SIZE> errstr{};
1166 std::cerr<< "Failed to allocate SwrContext: "
1167 <<av_make_error_string(errstr.data(), AV_ERROR_MAX_STRING_SIZE, err) <<std::endl;
1168 return 0;
1171 if(has_bfmt_ex)
1172 std::cout<< "Found AL_SOFT_bformat_ex" <<std::endl;
1173 else
1175 std::cout<< "Found AL_EXT_BFORMAT" <<std::endl;
1176 /* Without AL_SOFT_bformat_ex, OpenAL only supports FuMa channel
1177 * ordering and normalization, so a custom matrix is needed to
1178 * scale and reorder the source from AmbiX.
1180 std::vector<double> mtx(64_uz*64_uz, 0.0);
1181 mtx[0 + 0*64] = std::sqrt(0.5);
1182 mtx[3 + 1*64] = 1.0;
1183 mtx[1 + 2*64] = 1.0;
1184 mtx[2 + 3*64] = 1.0;
1185 swr_set_matrix(mSwresCtx.get(), mtx.data(), 64);
1188 else
1190 ChannelLayout layout{};
1191 av_channel_layout_from_mask(&layout, mDstChanLayout);
1193 int err{swr_alloc_set_opts2(al::out_ptr(mSwresCtx), &layout, mDstSampleFmt,
1194 mCodecCtx->sample_rate, &mCodecCtx->ch_layout, mCodecCtx->sample_fmt,
1195 mCodecCtx->sample_rate, 0, nullptr)};
1196 if(err != 0)
1198 std::array<char,AV_ERROR_MAX_STRING_SIZE> errstr{};
1199 std::cerr<< "Failed to allocate SwrContext: "
1200 <<av_make_error_string(errstr.data(), AV_ERROR_MAX_STRING_SIZE, err) <<std::endl;
1201 return 0;
1204 if(int err{swr_init(mSwresCtx.get())})
1206 std::array<char,AV_ERROR_MAX_STRING_SIZE> errstr{};
1207 std::cerr<< "Failed to initialize audio converter: "
1208 <<av_make_error_string(errstr.data(), AV_ERROR_MAX_STRING_SIZE, err) <<std::endl;
1209 return 0;
1212 alGenBuffers(static_cast<ALsizei>(mBuffers.size()), mBuffers.data());
1213 alGenSources(1, &mSource);
1215 if(DirectOutMode)
1216 alSourcei(mSource, AL_DIRECT_CHANNELS_SOFT, DirectOutMode);
1217 if(EnableWideStereo)
1219 static constexpr std::array angles{static_cast<float>(al::numbers::pi / 3.0),
1220 static_cast<float>(-al::numbers::pi / 3.0)};
1221 alSourcefv(mSource, AL_STEREO_ANGLES, angles.data());
1223 if(has_bfmt_ex)
1225 for(ALuint bufid : mBuffers)
1227 alBufferi(bufid, AL_AMBISONIC_LAYOUT_SOFT, ambi_layout);
1228 alBufferi(bufid, AL_AMBISONIC_SCALING_SOFT, ambi_scale);
1231 #ifdef AL_SOFT_UHJ
1232 if(EnableSuperStereo)
1233 alSourcei(mSource, AL_STEREO_MODE_SOFT, AL_SUPER_STEREO_SOFT);
1234 #endif
1236 if(alGetError() != AL_NO_ERROR)
1237 return 0;
1239 bool callback_ok{false};
1240 if(alBufferCallbackSOFT)
1242 alBufferCallbackSOFT(mBuffers[0], mFormat, mCodecCtx->sample_rate, bufferCallbackC, this);
1243 alSourcei(mSource, AL_BUFFER, static_cast<ALint>(mBuffers[0]));
1244 if(alGetError() != AL_NO_ERROR)
1246 fprintf(stderr, "Failed to set buffer callback\n");
1247 alSourcei(mSource, AL_BUFFER, 0);
1249 else
1251 mBufferData.resize(static_cast<size_t>(duration_cast<seconds>(mCodecCtx->sample_rate *
1252 AudioBufferTotalTime).count()) * mFrameSize);
1253 std::fill(mBufferData.begin(), mBufferData.end(), uint8_t{});
1255 mReadPos.store(0, std::memory_order_relaxed);
1256 mWritePos.store(mBufferData.size()/mFrameSize/2*mFrameSize, std::memory_order_relaxed);
1258 ALCint refresh{};
1259 alcGetIntegerv(alcGetContextsDevice(alcGetCurrentContext()), ALC_REFRESH, 1, &refresh);
1260 sleep_time = milliseconds{seconds{1}} / refresh;
1261 callback_ok = true;
1264 if(!callback_ok)
1265 buffer_len = static_cast<int>(duration_cast<seconds>(mCodecCtx->sample_rate *
1266 AudioBufferTime).count() * mFrameSize);
1267 if(buffer_len > 0)
1268 samples.resize(static_cast<ALuint>(buffer_len));
1270 /* Prefill the codec buffer. */
1271 auto packet_sender = [this]()
1273 while(true)
1275 const int ret{mQueue.sendPacket(mCodecCtx.get())};
1276 if(ret == AVErrorEOF) break;
1279 auto sender [[maybe_unused]] = std::async(std::launch::async, packet_sender);
1281 srclock.lock();
1282 if(alcGetInteger64vSOFT)
1284 int64_t devtime{};
1285 alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()), ALC_DEVICE_CLOCK_SOFT,
1286 1, &devtime);
1287 mDeviceStartTime = nanoseconds{devtime} - mCurrentPts;
1290 mSamplesLen = decodeFrame();
1291 if(mSamplesLen > 0)
1293 mSamplesPos = std::min(mSamplesLen, getSync());
1295 auto skip = nanoseconds{seconds{mSamplesPos}} / mCodecCtx->sample_rate;
1296 mDeviceStartTime -= skip;
1297 mCurrentPts += skip;
1300 while(true)
1302 if(mMovie.mQuit.load(std::memory_order_relaxed))
1304 /* If mQuit is set, drain frames until we can't get more audio,
1305 * indicating we've reached the flush packet and the packet sender
1306 * will also quit.
1308 do {
1309 mSamplesLen = decodeFrame();
1310 mSamplesPos = mSamplesLen;
1311 } while(mSamplesLen > 0);
1312 break;
1315 ALenum state;
1316 if(!mBufferData.empty())
1318 alGetSourcei(mSource, AL_SOURCE_STATE, &state);
1320 /* If mQuit is not set, don't quit even if there's no more audio,
1321 * so what's buffered has a chance to play to the real end.
1323 readAudio(getSync());
1325 else
1327 ALint processed, queued;
1329 /* First remove any processed buffers. */
1330 alGetSourcei(mSource, AL_BUFFERS_PROCESSED, &processed);
1331 while(processed > 0)
1333 ALuint bid;
1334 alSourceUnqueueBuffers(mSource, 1, &bid);
1335 --processed;
1338 /* Refill the buffer queue. */
1339 int sync_skip{getSync()};
1340 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
1341 while(static_cast<ALuint>(queued) < mBuffers.size())
1343 /* Read the next chunk of data, filling the buffer, and queue
1344 * it on the source.
1346 if(!readAudio(samples, static_cast<ALuint>(buffer_len), sync_skip))
1347 break;
1349 const ALuint bufid{mBuffers[mBufferIdx]};
1350 mBufferIdx = static_cast<ALuint>((mBufferIdx+1) % mBuffers.size());
1352 alBufferData(bufid, mFormat, samples.data(), buffer_len, mCodecCtx->sample_rate);
1353 alSourceQueueBuffers(mSource, 1, &bufid);
1354 ++queued;
1357 /* Check that the source is playing. */
1358 alGetSourcei(mSource, AL_SOURCE_STATE, &state);
1359 if(state == AL_STOPPED)
1361 /* AL_STOPPED means there was an underrun. Clear the buffer
1362 * queue since this likely means we're late, and rewind the
1363 * source to get it back into an AL_INITIAL state.
1365 alSourceRewind(mSource);
1366 alSourcei(mSource, AL_BUFFER, 0);
1367 if(alcGetInteger64vSOFT)
1369 /* Also update the device start time with the current
1370 * device clock, so the decoder knows we're running behind.
1372 int64_t devtime{};
1373 alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()),
1374 ALC_DEVICE_CLOCK_SOFT, 1, &devtime);
1375 mDeviceStartTime = nanoseconds{devtime} - mCurrentPts;
1377 continue;
1381 /* (re)start the source if needed, and wait for a buffer to finish */
1382 if(state != AL_PLAYING && state != AL_PAUSED)
1384 if(!startPlayback())
1385 break;
1387 if(ALenum err{alGetError()})
1388 std::cerr<< "Got AL error: 0x"<<std::hex<<err<<std::dec
1389 << " ("<<alGetString(err)<<")" <<std::endl;
1391 mSrcCond.wait_for(srclock, sleep_time);
1394 alSourceRewind(mSource);
1395 alSourcei(mSource, AL_BUFFER, 0);
1396 srclock.unlock();
1398 return 0;
1402 nanoseconds VideoState::getClock()
1404 /* NOTE: This returns incorrect times while not playing. */
1405 std::lock_guard<std::mutex> displock{mDispPtsMutex};
1406 if(mDisplayPtsTime == microseconds::min())
1407 return nanoseconds::zero();
1408 auto delta = get_avtime() - mDisplayPtsTime;
1409 return mDisplayPts + delta;
1412 /* Called by VideoState::updateVideo to display the next video frame. */
1413 void VideoState::display(SDL_Window *screen, SDL_Renderer *renderer, AVFrame *frame) const
1415 if(!mImage)
1416 return;
1418 double aspect_ratio;
1419 int win_w, win_h;
1420 int w, h, x, y;
1422 int frame_width{frame->width - static_cast<int>(frame->crop_left + frame->crop_right)};
1423 int frame_height{frame->height - static_cast<int>(frame->crop_top + frame->crop_bottom)};
1424 if(frame->sample_aspect_ratio.num == 0)
1425 aspect_ratio = 0.0;
1426 else
1428 aspect_ratio = av_q2d(frame->sample_aspect_ratio) * frame_width /
1429 frame_height;
1431 if(aspect_ratio <= 0.0)
1432 aspect_ratio = static_cast<double>(frame_width) / frame_height;
1434 SDL_GetWindowSize(screen, &win_w, &win_h);
1435 h = win_h;
1436 w = (static_cast<int>(std::rint(h * aspect_ratio)) + 3) & ~3;
1437 if(w > win_w)
1439 w = win_w;
1440 h = (static_cast<int>(std::rint(w / aspect_ratio)) + 3) & ~3;
1442 x = (win_w - w) / 2;
1443 y = (win_h - h) / 2;
1445 SDL_Rect src_rect{ static_cast<int>(frame->crop_left), static_cast<int>(frame->crop_top),
1446 frame_width, frame_height };
1447 SDL_Rect dst_rect{ x, y, w, h };
1448 SDL_RenderCopy(renderer, mImage, &src_rect, &dst_rect);
1449 SDL_RenderPresent(renderer);
1452 /* Called regularly on the main thread where the SDL_Renderer was created. It
1453 * handles updating the textures of decoded frames and displaying the latest
1454 * frame.
1456 void VideoState::updateVideo(SDL_Window *screen, SDL_Renderer *renderer, bool redraw)
1458 size_t read_idx{mPictQRead.load(std::memory_order_relaxed)};
1459 Picture *vp{&mPictQ[read_idx]};
1461 auto clocktime = mMovie.getMasterClock();
1462 bool updated{false};
1463 while(true)
1465 size_t next_idx{(read_idx+1)%mPictQ.size()};
1466 if(next_idx == mPictQWrite.load(std::memory_order_acquire))
1467 break;
1468 Picture *nextvp{&mPictQ[next_idx]};
1469 if(clocktime < nextvp->mPts && !mMovie.mQuit.load(std::memory_order_relaxed))
1471 /* For the first update, ensure the first frame gets shown. */
1472 if(!mFirstUpdate || updated)
1473 break;
1476 vp = nextvp;
1477 updated = true;
1478 read_idx = next_idx;
1480 if(mMovie.mQuit.load(std::memory_order_relaxed))
1482 if(mEOS)
1483 mFinalUpdate = true;
1484 mPictQRead.store(read_idx, std::memory_order_release);
1485 std::unique_lock<std::mutex>{mPictQMutex}.unlock();
1486 mPictQCond.notify_one();
1487 return;
1490 AVFrame *frame{vp->mFrame.get()};
1491 if(updated)
1493 mPictQRead.store(read_idx, std::memory_order_release);
1494 std::unique_lock<std::mutex>{mPictQMutex}.unlock();
1495 mPictQCond.notify_one();
1497 /* allocate or resize the buffer! */
1498 bool fmt_updated{false};
1499 if(!mImage || mWidth != frame->width || mHeight != frame->height)
1501 fmt_updated = true;
1502 if(mImage)
1503 SDL_DestroyTexture(mImage);
1504 mImage = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
1505 frame->width, frame->height);
1506 if(!mImage)
1507 std::cerr<< "Failed to create YV12 texture!" <<std::endl;
1508 mWidth = frame->width;
1509 mHeight = frame->height;
1512 int frame_width{frame->width - static_cast<int>(frame->crop_left + frame->crop_right)};
1513 int frame_height{frame->height - static_cast<int>(frame->crop_top + frame->crop_bottom)};
1514 if(mFirstUpdate && frame_width > 0 && frame_height > 0)
1516 /* For the first update, set the window size to the video size. */
1517 mFirstUpdate = false;
1519 if(frame->sample_aspect_ratio.den != 0)
1521 double aspect_ratio = av_q2d(frame->sample_aspect_ratio);
1522 if(aspect_ratio >= 1.0)
1523 frame_width = static_cast<int>(std::lround(frame_width * aspect_ratio));
1524 else if(aspect_ratio > 0.0)
1525 frame_height = static_cast<int>(std::lround(frame_height / aspect_ratio));
1527 SDL_SetWindowSize(screen, frame_width, frame_height);
1530 if(mImage)
1532 void *pixels{nullptr};
1533 int pitch{0};
1535 if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P)
1536 SDL_UpdateYUVTexture(mImage, nullptr,
1537 frame->data[0], frame->linesize[0],
1538 frame->data[1], frame->linesize[1],
1539 frame->data[2], frame->linesize[2]
1541 else if(SDL_LockTexture(mImage, nullptr, &pixels, &pitch) != 0)
1542 std::cerr<< "Failed to lock texture" <<std::endl;
1543 else
1545 // Convert the image into YUV format that SDL uses
1546 int w{frame->width};
1547 int h{frame->height};
1548 if(!mSwscaleCtx || fmt_updated)
1550 mSwscaleCtx.reset(sws_getContext(
1551 w, h, mCodecCtx->pix_fmt,
1552 w, h, AV_PIX_FMT_YUV420P, 0,
1553 nullptr, nullptr, nullptr
1557 /* point pict at the queue */
1558 const auto framesize = static_cast<size_t>(w)*static_cast<size_t>(h);
1559 const auto pixelspan = al::span{static_cast<uint8_t*>(pixels), framesize*3/2};
1560 const std::array pict_data{
1561 al::to_address(pixelspan.begin()),
1562 al::to_address(pixelspan.begin() + ptrdiff_t{w}*h),
1563 al::to_address(pixelspan.begin() + ptrdiff_t{w}*h + ptrdiff_t{w}*h/4)
1565 const std::array pict_linesize{pitch, pitch/2, pitch/2};
1567 sws_scale(mSwscaleCtx.get(), std::data(frame->data), std::data(frame->linesize),
1568 0, h, pict_data.data(), pict_linesize.data());
1569 SDL_UnlockTexture(mImage);
1572 redraw = true;
1576 if(redraw)
1578 /* Show the picture! */
1579 display(screen, renderer, frame);
1582 if(updated)
1584 auto disp_time = get_avtime();
1586 std::lock_guard<std::mutex> displock{mDispPtsMutex};
1587 mDisplayPts = vp->mPts;
1588 mDisplayPtsTime = disp_time;
1590 if(mEOS.load(std::memory_order_acquire))
1592 if((read_idx+1)%mPictQ.size() == mPictQWrite.load(std::memory_order_acquire))
1594 mFinalUpdate = true;
1595 std::unique_lock<std::mutex>{mPictQMutex}.unlock();
1596 mPictQCond.notify_one();
1601 int VideoState::handler()
1603 std::for_each(mPictQ.begin(), mPictQ.end(),
1604 [](Picture &pict) -> void
1605 { pict.mFrame = AVFramePtr{av_frame_alloc()}; });
1607 /* Prefill the codec buffer. */
1608 auto packet_sender = [this]()
1610 while(true)
1612 const int ret{mQueue.sendPacket(mCodecCtx.get())};
1613 if(ret == AVErrorEOF) break;
1616 auto sender [[maybe_unused]] = std::async(std::launch::async, packet_sender);
1619 std::lock_guard<std::mutex> displock{mDispPtsMutex};
1620 mDisplayPtsTime = get_avtime();
1623 auto current_pts = nanoseconds::zero();
1624 while(true)
1626 size_t write_idx{mPictQWrite.load(std::memory_order_relaxed)};
1627 Picture *vp{&mPictQ[write_idx]};
1629 /* Retrieve video frame. */
1630 AVFrame *decoded_frame{vp->mFrame.get()};
1631 while(int ret{mQueue.receiveFrame(mCodecCtx.get(), decoded_frame)})
1633 if(ret == AVErrorEOF) goto finish;
1634 std::cerr<< "Failed to receive frame: "<<ret <<std::endl;
1637 /* Get the PTS for this frame. */
1638 if(decoded_frame->best_effort_timestamp != AVNoPtsValue)
1639 current_pts = duration_cast<nanoseconds>(seconds_d64{av_q2d(mStream->time_base) *
1640 static_cast<double>(decoded_frame->best_effort_timestamp)});
1641 vp->mPts = current_pts;
1643 /* Update the video clock to the next expected PTS. */
1644 auto frame_delay = av_q2d(mCodecCtx->time_base);
1645 frame_delay += decoded_frame->repeat_pict * (frame_delay * 0.5);
1646 current_pts += duration_cast<nanoseconds>(seconds_d64{frame_delay});
1648 /* Put the frame in the queue to be loaded into a texture and displayed
1649 * by the rendering thread.
1651 write_idx = (write_idx+1)%mPictQ.size();
1652 mPictQWrite.store(write_idx, std::memory_order_release);
1654 if(write_idx == mPictQRead.load(std::memory_order_acquire))
1656 /* Wait until we have space for a new pic */
1657 std::unique_lock<std::mutex> lock{mPictQMutex};
1658 while(write_idx == mPictQRead.load(std::memory_order_acquire))
1659 mPictQCond.wait(lock);
1662 finish:
1663 mEOS = true;
1665 std::unique_lock<std::mutex> lock{mPictQMutex};
1666 while(!mFinalUpdate) mPictQCond.wait(lock);
1668 return 0;
1672 int MovieState::decode_interrupt_cb(void *ctx)
1674 return static_cast<MovieState*>(ctx)->mQuit.load(std::memory_order_relaxed);
1677 bool MovieState::prepare()
1679 AVIOContext *avioctx{nullptr};
1680 AVIOInterruptCB intcb{decode_interrupt_cb, this};
1681 if(avio_open2(&avioctx, mFilename.c_str(), AVIO_FLAG_READ, &intcb, nullptr))
1683 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1684 return false;
1686 mIOContext.reset(avioctx);
1688 /* Open movie file. If avformat_open_input fails it will automatically free
1689 * this context, so don't set it onto a smart pointer yet.
1691 AVFormatContext *fmtctx{avformat_alloc_context()};
1692 fmtctx->pb = mIOContext.get();
1693 fmtctx->interrupt_callback = intcb;
1694 if(avformat_open_input(&fmtctx, mFilename.c_str(), nullptr, nullptr) != 0)
1696 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1697 return false;
1699 mFormatCtx.reset(fmtctx);
1701 /* Retrieve stream information */
1702 if(avformat_find_stream_info(mFormatCtx.get(), nullptr) < 0)
1704 std::cerr<< mFilename<<": failed to find stream info" <<std::endl;
1705 return false;
1708 /* Dump information about file onto standard error */
1709 av_dump_format(mFormatCtx.get(), 0, mFilename.c_str(), 0);
1711 mParseThread = std::thread{std::mem_fn(&MovieState::parse_handler), this};
1713 std::unique_lock<std::mutex> slock{mStartupMutex};
1714 while(!mStartupDone) mStartupCond.wait(slock);
1715 return true;
1718 void MovieState::setTitle(SDL_Window *window) const
1720 auto pos1 = mFilename.rfind('/');
1721 auto pos2 = mFilename.rfind('\\');
1722 auto fpos = ((pos1 == std::string::npos) ? pos2 :
1723 (pos2 == std::string::npos) ? pos1 :
1724 std::max(pos1, pos2)) + 1;
1725 SDL_SetWindowTitle(window, (mFilename.substr(fpos)+" - "+AppName).c_str());
1728 nanoseconds MovieState::getClock() const
1730 if(mClockBase == microseconds::min())
1731 return nanoseconds::zero();
1732 return get_avtime() - mClockBase;
1735 nanoseconds MovieState::getMasterClock()
1737 if(mAVSyncType == SyncMaster::Video && mVideo.mStream)
1738 return mVideo.getClock();
1739 if(mAVSyncType == SyncMaster::Audio && mAudio.mStream)
1740 return mAudio.getClock();
1741 return getClock();
1744 nanoseconds MovieState::getDuration() const
1745 { return std::chrono::duration<int64_t,std::ratio<1,AV_TIME_BASE>>(mFormatCtx->duration); }
1747 bool MovieState::streamComponentOpen(AVStream *stream)
1749 /* Get a pointer to the codec context for the stream, and open the
1750 * associated codec.
1752 AVCodecCtxPtr avctx{avcodec_alloc_context3(nullptr)};
1753 if(!avctx) return false;
1755 if(avcodec_parameters_to_context(avctx.get(), stream->codecpar))
1756 return false;
1758 const AVCodec *codec{avcodec_find_decoder(avctx->codec_id)};
1759 if(!codec || avcodec_open2(avctx.get(), codec, nullptr) < 0)
1761 std::cerr<< "Unsupported codec: "<<avcodec_get_name(avctx->codec_id)
1762 << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl;
1763 return false;
1766 /* Initialize and start the media type handler */
1767 switch(avctx->codec_type)
1769 case AVMEDIA_TYPE_AUDIO:
1770 mAudio.mStream = stream;
1771 mAudio.mCodecCtx = std::move(avctx);
1772 return true;
1774 case AVMEDIA_TYPE_VIDEO:
1775 mVideo.mStream = stream;
1776 mVideo.mCodecCtx = std::move(avctx);
1777 return true;
1779 default:
1780 break;
1783 return false;
1786 int MovieState::parse_handler()
1788 auto &audio_queue = mAudio.mQueue;
1789 auto &video_queue = mVideo.mQueue;
1791 int video_index{-1};
1792 int audio_index{-1};
1794 /* Find the first video and audio streams */
1795 const auto ctxstreams = al::span{mFormatCtx->streams, mFormatCtx->nb_streams};
1796 for(size_t i{0};i < ctxstreams.size();++i)
1798 auto codecpar = ctxstreams[i]->codecpar;
1799 if(codecpar->codec_type == AVMEDIA_TYPE_VIDEO && !DisableVideo && video_index < 0
1800 && streamComponentOpen(ctxstreams[i]))
1801 video_index = static_cast<int>(i);
1802 else if(codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0
1803 && streamComponentOpen(ctxstreams[i]))
1804 audio_index = static_cast<int>(i);
1808 std::unique_lock<std::mutex> slock{mStartupMutex};
1809 mStartupDone = true;
1811 mStartupCond.notify_all();
1813 if(video_index < 0 && audio_index < 0)
1815 std::cerr<< mFilename<<": could not open codecs" <<std::endl;
1816 mQuit = true;
1819 /* Set the base time 750ms ahead of the current av time. */
1820 mClockBase = get_avtime() + milliseconds{750};
1822 if(audio_index >= 0)
1823 mAudioThread = std::thread{std::mem_fn(&AudioState::handler), &mAudio};
1824 if(video_index >= 0)
1825 mVideoThread = std::thread{std::mem_fn(&VideoState::handler), &mVideo};
1827 /* Main packet reading/dispatching loop */
1828 AVPacketPtr packet{av_packet_alloc()};
1829 while(!mQuit.load(std::memory_order_relaxed))
1831 if(av_read_frame(mFormatCtx.get(), packet.get()) < 0)
1832 break;
1834 /* Copy the packet into the queue it's meant for. */
1835 if(packet->stream_index == video_index)
1837 while(!mQuit.load(std::memory_order_acquire) && !video_queue.put(packet.get()))
1838 std::this_thread::sleep_for(milliseconds{100});
1840 else if(packet->stream_index == audio_index)
1842 while(!mQuit.load(std::memory_order_acquire) && !audio_queue.put(packet.get()))
1843 std::this_thread::sleep_for(milliseconds{100});
1846 av_packet_unref(packet.get());
1848 /* Finish the queues so the receivers know nothing more is coming. */
1849 video_queue.setFinished();
1850 audio_queue.setFinished();
1852 /* all done - wait for it */
1853 if(mVideoThread.joinable())
1854 mVideoThread.join();
1855 if(mAudioThread.joinable())
1856 mAudioThread.join();
1858 mVideo.mEOS = true;
1859 std::unique_lock<std::mutex> lock{mVideo.mPictQMutex};
1860 while(!mVideo.mFinalUpdate)
1861 mVideo.mPictQCond.wait(lock);
1862 lock.unlock();
1864 SDL_Event evt{};
1865 evt.user.type = FF_MOVIE_DONE_EVENT;
1866 SDL_PushEvent(&evt);
1868 return 0;
1871 void MovieState::stop()
1873 mQuit = true;
1874 mAudio.mQueue.flush();
1875 mVideo.mQueue.flush();
1879 // Helper class+method to print the time with human-readable formatting.
1880 struct PrettyTime {
1881 seconds mTime;
1883 std::ostream &operator<<(std::ostream &os, const PrettyTime &rhs)
1885 using hours = std::chrono::hours;
1886 using minutes = std::chrono::minutes;
1888 seconds t{rhs.mTime};
1889 if(t.count() < 0)
1891 os << '-';
1892 t *= -1;
1895 // Only handle up to hour formatting
1896 if(t >= hours{1})
1897 os << duration_cast<hours>(t).count() << 'h' << std::setfill('0') << std::setw(2)
1898 << (duration_cast<minutes>(t).count() % 60) << 'm';
1899 else
1900 os << duration_cast<minutes>(t).count() << 'm' << std::setfill('0');
1901 os << std::setw(2) << (duration_cast<seconds>(t).count() % 60) << 's' << std::setw(0)
1902 << std::setfill(' ');
1903 return os;
1907 int main(al::span<std::string_view> args)
1909 SDL_SetMainReady();
1911 std::unique_ptr<MovieState> movState;
1913 if(args.size() < 2)
1915 std::cerr<< "Usage: "<<args[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl;
1916 return 1;
1918 /* Register all formats and codecs */
1919 #if !(LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(58, 9, 100))
1920 av_register_all();
1921 #endif
1922 /* Initialize networking protocols */
1923 avformat_network_init();
1925 if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_EVENTS))
1927 std::cerr<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl;
1928 return 1;
1931 /* Make a window to put our video */
1932 SDL_Window *screen{SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE)};
1933 if(!screen)
1935 std::cerr<< "SDL: could not set video mode - exiting" <<std::endl;
1936 return 1;
1938 /* Make a renderer to handle the texture image surface and rendering. */
1939 Uint32 render_flags{SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC};
1940 SDL_Renderer *renderer{SDL_CreateRenderer(screen, -1, render_flags)};
1941 if(renderer)
1943 SDL_RendererInfo rinf{};
1944 bool ok{false};
1946 /* Make sure the renderer supports IYUV textures. If not, fallback to a
1947 * software renderer. */
1948 if(SDL_GetRendererInfo(renderer, &rinf) == 0)
1950 for(Uint32 i{0u};!ok && i < rinf.num_texture_formats;i++)
1951 ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_IYUV);
1953 if(!ok)
1955 std::cerr<< "IYUV pixelformat textures not supported on renderer "<<rinf.name <<std::endl;
1956 SDL_DestroyRenderer(renderer);
1957 renderer = nullptr;
1960 if(!renderer)
1962 render_flags = SDL_RENDERER_SOFTWARE | SDL_RENDERER_PRESENTVSYNC;
1963 renderer = SDL_CreateRenderer(screen, -1, render_flags);
1965 if(!renderer)
1967 std::cerr<< "SDL: could not create renderer - exiting" <<std::endl;
1968 return 1;
1970 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1971 SDL_RenderFillRect(renderer, nullptr);
1972 SDL_RenderPresent(renderer);
1974 /* Open an audio device */
1975 args = args.subspan(1);
1976 if(InitAL(args) != 0)
1977 return 1;
1980 ALCdevice *device{alcGetContextsDevice(alcGetCurrentContext())};
1981 if(alcIsExtensionPresent(device,"ALC_SOFT_device_clock"))
1983 std::cout<< "Found ALC_SOFT_device_clock" <<std::endl;
1984 alcGetInteger64vSOFT = reinterpret_cast<LPALCGETINTEGER64VSOFT>(
1985 alcGetProcAddress(device, "alcGetInteger64vSOFT"));
1989 if(alIsExtensionPresent("AL_SOFT_source_latency"))
1991 std::cout<< "Found AL_SOFT_source_latency" <<std::endl;
1992 alGetSourcei64vSOFT = reinterpret_cast<LPALGETSOURCEI64VSOFT>(
1993 alGetProcAddress("alGetSourcei64vSOFT"));
1995 if(alIsExtensionPresent("AL_SOFT_events"))
1997 std::cout<< "Found AL_SOFT_events" <<std::endl;
1998 alEventControlSOFT = reinterpret_cast<LPALEVENTCONTROLSOFT>(
1999 alGetProcAddress("alEventControlSOFT"));
2000 alEventCallbackSOFT = reinterpret_cast<LPALEVENTCALLBACKSOFT>(
2001 alGetProcAddress("alEventCallbackSOFT"));
2003 if(alIsExtensionPresent("AL_SOFT_callback_buffer"))
2005 std::cout<< "Found AL_SOFT_callback_buffer" <<std::endl;
2006 alBufferCallbackSOFT = reinterpret_cast<LPALBUFFERCALLBACKSOFT>(
2007 alGetProcAddress("alBufferCallbackSOFT"));
2010 size_t fileidx{0};
2011 for(;fileidx < args.size();++fileidx)
2013 if(args[fileidx] == "-direct")
2015 if(alIsExtensionPresent("AL_SOFT_direct_channels_remix"))
2017 std::cout<< "Found AL_SOFT_direct_channels_remix" <<std::endl;
2018 DirectOutMode = AL_REMIX_UNMATCHED_SOFT;
2020 else if(alIsExtensionPresent("AL_SOFT_direct_channels"))
2022 std::cout<< "Found AL_SOFT_direct_channels" <<std::endl;
2023 DirectOutMode = AL_DROP_UNMATCHED_SOFT;
2025 else
2026 std::cerr<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl;
2028 else if(args[fileidx] == "-wide")
2030 if(!alIsExtensionPresent("AL_EXT_STEREO_ANGLES"))
2031 std::cerr<< "AL_EXT_STEREO_ANGLES not supported for wide stereo" <<std::endl;
2032 else
2034 std::cout<< "Found AL_EXT_STEREO_ANGLES" <<std::endl;
2035 EnableWideStereo = true;
2038 else if(args[fileidx] == "-uhj")
2040 if(!alIsExtensionPresent("AL_SOFT_UHJ"))
2041 std::cerr<< "AL_SOFT_UHJ not supported for UHJ decoding" <<std::endl;
2042 else
2044 std::cout<< "Found AL_SOFT_UHJ" <<std::endl;
2045 EnableUhj = true;
2048 else if(args[fileidx] == "-superstereo")
2050 if(!alIsExtensionPresent("AL_SOFT_UHJ"))
2051 std::cerr<< "AL_SOFT_UHJ not supported for Super Stereo decoding" <<std::endl;
2052 else
2054 std::cout<< "Found AL_SOFT_UHJ (Super Stereo)" <<std::endl;
2055 EnableSuperStereo = true;
2058 else if(args[fileidx] == "-novideo")
2059 DisableVideo = true;
2060 else
2061 break;
2064 while(fileidx < args.size() && !movState)
2066 movState = std::make_unique<MovieState>(args[fileidx++]);
2067 if(!movState->prepare()) movState = nullptr;
2069 if(!movState)
2071 std::cerr<< "Could not start a video" <<std::endl;
2072 return 1;
2074 movState->setTitle(screen);
2076 /* Default to going to the next movie at the end of one. */
2077 enum class EomAction {
2078 Next, Quit
2079 } eom_action{EomAction::Next};
2080 seconds last_time{seconds::min()};
2081 while(true)
2083 /* SDL_WaitEventTimeout is broken, just force a 10ms sleep. */
2084 std::this_thread::sleep_for(milliseconds{10});
2086 auto cur_time = std::chrono::duration_cast<seconds>(movState->getMasterClock());
2087 if(cur_time != last_time)
2089 auto end_time = std::chrono::duration_cast<seconds>(movState->getDuration());
2090 std::cout<< " \r "<<PrettyTime{cur_time}<<" / "<<PrettyTime{end_time} <<std::flush;
2091 last_time = cur_time;
2094 bool force_redraw{false};
2095 SDL_Event event{};
2096 while(SDL_PollEvent(&event) != 0)
2098 switch(event.type)
2100 case SDL_KEYDOWN:
2101 switch(event.key.keysym.sym)
2103 case SDLK_ESCAPE:
2104 movState->stop();
2105 eom_action = EomAction::Quit;
2106 break;
2108 case SDLK_n:
2109 movState->stop();
2110 eom_action = EomAction::Next;
2111 break;
2113 default:
2114 break;
2116 break;
2118 case SDL_WINDOWEVENT:
2119 switch(event.window.event)
2121 case SDL_WINDOWEVENT_RESIZED:
2122 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
2123 SDL_RenderFillRect(renderer, nullptr);
2124 force_redraw = true;
2125 break;
2127 case SDL_WINDOWEVENT_EXPOSED:
2128 force_redraw = true;
2129 break;
2131 default:
2132 break;
2134 break;
2136 case SDL_QUIT:
2137 movState->stop();
2138 eom_action = EomAction::Quit;
2139 break;
2141 case FF_MOVIE_DONE_EVENT:
2142 std::cout<<'\n';
2143 last_time = seconds::min();
2144 if(eom_action != EomAction::Quit)
2146 movState = nullptr;
2147 while(fileidx < args.size() && !movState)
2149 movState = std::make_unique<MovieState>(args[fileidx++]);
2150 if(!movState->prepare()) movState = nullptr;
2152 if(movState)
2154 movState->setTitle(screen);
2155 break;
2159 /* Nothing more to play. Shut everything down and quit. */
2160 movState = nullptr;
2162 CloseAL();
2164 SDL_DestroyRenderer(renderer);
2165 renderer = nullptr;
2166 SDL_DestroyWindow(screen);
2167 screen = nullptr;
2169 SDL_Quit();
2170 exit(0);
2172 default:
2173 break;
2177 movState->mVideo.updateVideo(screen, renderer, force_redraw);
2180 std::cerr<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl;
2181 return 1;
2184 } // namespace
2186 int main(int argc, char *argv[])
2188 assert(argc >= 0);
2189 auto args = std::vector<std::string_view>(static_cast<unsigned int>(argc));
2190 std::copy_n(argv, args.size(), args.begin());
2191 return main(al::span{args});