2 * An example showing how to play a stream sync'd to video, using ffmpeg.
7 #include <condition_variable>
32 _Pragma("GCC diagnostic push")
33 _Pragma("GCC diagnostic ignored \"-Wconversion\"")
34 _Pragma("GCC diagnostic ignored \"-Wold-style-cast\"")
36 #include "libavcodec/avcodec.h"
37 #include "libavformat/avformat.h"
38 #include "libavformat/avio.h"
39 #include "libavformat/version.h"
40 #include "libavutil/avutil.h"
41 #include "libavutil/error.h"
42 #include "libavutil/frame.h"
43 #include "libavutil/mem.h"
44 #include "libavutil/pixfmt.h"
45 #include "libavutil/rational.h"
46 #include "libavutil/samplefmt.h"
47 #include "libavutil/time.h"
48 #include "libavutil/version.h"
49 #include "libavutil/channel_layout.h"
50 #include "libswscale/swscale.h"
51 #include "libswresample/swresample.h"
53 constexpr auto AVNoPtsValue
= AV_NOPTS_VALUE
;
54 constexpr auto AVErrorEOF
= AVERROR_EOF
;
58 _Pragma("GCC diagnostic pop")
68 #include "common/alhelpers.h"
71 /* Undefine this to disable use of experimental extensions. Don't use for
72 * production code! Interfaces and behavior may change prior to being
75 #define ALLOW_EXPERIMENTAL_EXTS
77 #ifdef ALLOW_EXPERIMENTAL_EXTS
78 #ifndef AL_SOFT_events
79 #define AL_SOFT_events 1
80 #define AL_EVENT_CALLBACK_FUNCTION_SOFT 0x1220
81 #define AL_EVENT_CALLBACK_USER_PARAM_SOFT 0x1221
82 #define AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT 0x1222
83 #define AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT 0x1223
84 #define AL_EVENT_TYPE_ERROR_SOFT 0x1224
85 #define AL_EVENT_TYPE_PERFORMANCE_SOFT 0x1225
86 #define AL_EVENT_TYPE_DEPRECATED_SOFT 0x1226
87 #define AL_EVENT_TYPE_DISCONNECTED_SOFT 0x1227
88 typedef void (AL_APIENTRY
*ALEVENTPROCSOFT
)(ALenum eventType
, ALuint object
, ALuint param
,
89 ALsizei length
, const ALchar
*message
,
91 typedef void (AL_APIENTRY
*LPALEVENTCONTROLSOFT
)(ALsizei count
, const ALenum
*types
, ALboolean enable
);
92 typedef void (AL_APIENTRY
*LPALEVENTCALLBACKSOFT
)(ALEVENTPROCSOFT callback
, void *userParam
);
93 typedef void* (AL_APIENTRY
*LPALGETPOINTERSOFT
)(ALenum pname
);
94 typedef void (AL_APIENTRY
*LPALGETPOINTERVSOFT
)(ALenum pname
, void **values
);
97 #ifndef AL_SOFT_callback_buffer
98 #define AL_SOFT_callback_buffer
99 typedef unsigned int ALbitfieldSOFT
;
100 #define AL_BUFFER_CALLBACK_FUNCTION_SOFT 0x19A0
101 #define AL_BUFFER_CALLBACK_USER_PARAM_SOFT 0x19A1
102 typedef ALsizei (AL_APIENTRY
*LPALBUFFERCALLBACKTYPESOFT
)(ALvoid
*userptr
, ALvoid
*sampledata
, ALsizei numsamples
);
103 typedef void (AL_APIENTRY
*LPALBUFFERCALLBACKSOFT
)(ALuint buffer
, ALenum format
, ALsizei freq
, LPALBUFFERCALLBACKTYPESOFT callback
, ALvoid
*userptr
, ALbitfieldSOFT flags
);
104 typedef void (AL_APIENTRY
*LPALGETBUFFERPTRSOFT
)(ALuint buffer
, ALenum param
, ALvoid
**value
);
105 typedef void (AL_APIENTRY
*LPALGETBUFFER3PTRSOFT
)(ALuint buffer
, ALenum param
, ALvoid
**value1
, ALvoid
**value2
, ALvoid
**value3
);
106 typedef void (AL_APIENTRY
*LPALGETBUFFERPTRVSOFT
)(ALuint buffer
, ALenum param
, ALvoid
**values
);
108 #endif /* ALLOW_EXPERIMENTAL_EXTS */
113 inline constexpr int64_t operator "" _i64(unsigned long long int n
) noexcept
{ return static_cast<int64_t>(n
); }
116 #define M_PI (3.14159265358979323846)
119 using fixed32
= std::chrono::duration
<int64_t,std::ratio
<1,(1_i64
<<32)>>;
120 using nanoseconds
= std::chrono::nanoseconds
;
121 using microseconds
= std::chrono::microseconds
;
122 using milliseconds
= std::chrono::milliseconds
;
123 using seconds
= std::chrono::seconds
;
124 using seconds_d64
= std::chrono::duration
<double>;
125 using std::chrono::duration_cast
;
127 const std::string AppName
{"alffplay"};
129 ALenum DirectOutMode
{AL_FALSE
};
130 bool EnableWideStereo
{false};
131 bool DisableVideo
{false};
132 LPALGETSOURCEI64VSOFT alGetSourcei64vSOFT
;
133 LPALCGETINTEGER64VSOFT alcGetInteger64vSOFT
;
135 #ifdef AL_SOFT_events
136 LPALEVENTCONTROLSOFT alEventControlSOFT
;
137 LPALEVENTCALLBACKSOFT alEventCallbackSOFT
;
140 #ifdef AL_SOFT_callback_buffer
141 LPALBUFFERCALLBACKSOFT alBufferCallbackSOFT
;
144 const seconds AVNoSyncThreshold
{10};
146 #define VIDEO_PICTURE_QUEUE_SIZE 24
148 const seconds_d64 AudioSyncThreshold
{0.03};
149 const milliseconds AudioSampleCorrectionMax
{50};
150 /* Averaging filter coefficient for audio sync. */
151 #define AUDIO_DIFF_AVG_NB 20
152 const double AudioAvgFilterCoeff
{std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB
)};
153 /* Per-buffer size, in time */
154 constexpr milliseconds AudioBufferTime
{20};
155 /* Buffer total size, in time (should be divisible by the buffer time) */
156 constexpr milliseconds AudioBufferTotalTime
{800};
157 constexpr auto AudioBufferCount
= AudioBufferTotalTime
/ AudioBufferTime
;
160 FF_MOVIE_DONE_EVENT
= SDL_USEREVENT
163 enum class SyncMaster
{
172 inline microseconds
get_avtime()
173 { return microseconds
{av_gettime()}; }
175 /* Define unique_ptrs to auto-cleanup associated ffmpeg objects. */
176 struct AVIOContextDeleter
{
177 void operator()(AVIOContext
*ptr
) { avio_closep(&ptr
); }
179 using AVIOContextPtr
= std::unique_ptr
<AVIOContext
,AVIOContextDeleter
>;
181 struct AVFormatCtxDeleter
{
182 void operator()(AVFormatContext
*ptr
) { avformat_close_input(&ptr
); }
184 using AVFormatCtxPtr
= std::unique_ptr
<AVFormatContext
,AVFormatCtxDeleter
>;
186 struct AVCodecCtxDeleter
{
187 void operator()(AVCodecContext
*ptr
) { avcodec_free_context(&ptr
); }
189 using AVCodecCtxPtr
= std::unique_ptr
<AVCodecContext
,AVCodecCtxDeleter
>;
191 struct AVFrameDeleter
{
192 void operator()(AVFrame
*ptr
) { av_frame_free(&ptr
); }
194 using AVFramePtr
= std::unique_ptr
<AVFrame
,AVFrameDeleter
>;
196 struct SwrContextDeleter
{
197 void operator()(SwrContext
*ptr
) { swr_free(&ptr
); }
199 using SwrContextPtr
= std::unique_ptr
<SwrContext
,SwrContextDeleter
>;
201 struct SwsContextDeleter
{
202 void operator()(SwsContext
*ptr
) { sws_freeContext(ptr
); }
204 using SwsContextPtr
= std::unique_ptr
<SwsContext
,SwsContextDeleter
>;
207 template<size_t SizeLimit
>
210 std::condition_variable mCondVar
;
211 std::deque
<AVPacket
> mPackets
;
212 size_t mTotalSize
{0};
213 bool mFinished
{false};
215 AVPacket
*getPacket(std::unique_lock
<std::mutex
> &lock
)
217 while(mPackets
.empty() && !mFinished
)
219 return mPackets
.empty() ? nullptr : &mPackets
.front();
224 AVPacket
*pkt
= &mPackets
.front();
225 mTotalSize
-= static_cast<unsigned int>(pkt
->size
);
226 av_packet_unref(pkt
);
227 mPackets
.pop_front();
233 for(AVPacket
&pkt
: mPackets
)
234 av_packet_unref(&pkt
);
239 int sendTo(AVCodecContext
*codecctx
)
241 std::unique_lock
<std::mutex
> lock
{mMutex
};
243 AVPacket
*pkt
{getPacket(lock
)};
244 if(!pkt
) return avcodec_send_packet(codecctx
, nullptr);
246 const int ret
{avcodec_send_packet(codecctx
, pkt
)};
247 if(ret
!= AVERROR(EAGAIN
))
250 std::cerr
<< "Failed to send packet: "<<ret
<<std::endl
;
259 std::lock_guard
<std::mutex
> _
{mMutex
};
262 mCondVar
.notify_one();
265 bool put(const AVPacket
*pkt
)
268 std::unique_lock
<std::mutex
> lock
{mMutex
};
269 if(mTotalSize
>= SizeLimit
)
272 mPackets
.push_back(AVPacket
{});
273 if(av_packet_ref(&mPackets
.back(), pkt
) != 0)
279 mTotalSize
+= static_cast<unsigned int>(mPackets
.back().size
);
281 mCondVar
.notify_one();
292 AVStream
*mStream
{nullptr};
293 AVCodecCtxPtr mCodecCtx
;
295 PacketQueue
<2*1024*1024> mPackets
;
297 /* Used for clock difference average computation */
298 seconds_d64 mClockDiffAvg
{0};
300 /* Time of the next sample to be buffered */
301 nanoseconds mCurrentPts
{0};
303 /* Device clock time that the stream started at. */
304 nanoseconds mDeviceStartTime
{nanoseconds::min()};
306 /* Decompressed sample frame, and swresample context for conversion */
307 AVFramePtr mDecodedFrame
;
308 SwrContextPtr mSwresCtx
;
310 /* Conversion format, for what gets fed to OpenAL */
311 uint64_t mDstChanLayout
{0};
312 AVSampleFormat mDstSampleFmt
{AV_SAMPLE_FMT_NONE
};
314 /* Storage of converted samples */
315 uint8_t *mSamples
{nullptr};
316 int mSamplesLen
{0}; /* In samples */
320 std::unique_ptr
<uint8_t[]> mBufferData
;
321 size_t mBufferDataSize
{0};
322 std::atomic
<size_t> mReadPos
{0};
323 std::atomic
<size_t> mWritePos
{0};
326 ALenum mFormat
{AL_NONE
};
327 ALuint mFrameSize
{0};
329 std::mutex mSrcMutex
;
330 std::condition_variable mSrcCond
;
331 std::atomic_flag mConnected
;
333 std::array
<ALuint
,AudioBufferCount
> mBuffers
{};
334 ALuint mBufferIdx
{0};
336 AudioState(MovieState
&movie
) : mMovie(movie
)
337 { mConnected
.test_and_set(std::memory_order_relaxed
); }
341 alDeleteSources(1, &mSource
);
343 alDeleteBuffers(static_cast<ALsizei
>(mBuffers
.size()), mBuffers
.data());
348 #ifdef AL_SOFT_events
349 static void AL_APIENTRY
EventCallback(ALenum eventType
, ALuint object
, ALuint param
,
350 ALsizei length
, const ALchar
*message
, void *userParam
);
352 #ifdef AL_SOFT_callback_buffer
353 static ALsizei AL_APIENTRY
bufferCallbackC(void *userptr
, void *data
, ALsizei size
)
354 { return static_cast<AudioState
*>(userptr
)->bufferCallback(data
, size
); }
355 ALsizei
bufferCallback(void *data
, ALsizei size
);
358 nanoseconds
getClockNoLock();
359 nanoseconds
getClock()
361 std::lock_guard
<std::mutex
> lock
{mSrcMutex
};
362 return getClockNoLock();
365 bool startPlayback();
369 bool readAudio(uint8_t *samples
, unsigned int length
, int &sample_skip
);
370 void readAudio(int sample_skip
);
378 AVStream
*mStream
{nullptr};
379 AVCodecCtxPtr mCodecCtx
;
381 PacketQueue
<14*1024*1024> mPackets
;
383 /* The pts of the currently displayed frame, and the time (av_gettime) it
384 * was last updated - used to have running video pts
386 nanoseconds mDisplayPts
{0};
387 microseconds mDisplayPtsTime
{microseconds::min()};
388 std::mutex mDispPtsMutex
;
390 /* Swscale context for format conversion */
391 SwsContextPtr mSwscaleCtx
;
395 nanoseconds mPts
{nanoseconds::min()};
397 std::array
<Picture
,VIDEO_PICTURE_QUEUE_SIZE
> mPictQ
;
398 std::atomic
<size_t> mPictQRead
{0u}, mPictQWrite
{1u};
399 std::mutex mPictQMutex
;
400 std::condition_variable mPictQCond
;
402 SDL_Texture
*mImage
{nullptr};
403 int mWidth
{0}, mHeight
{0}; /* Logical image size (actual size may be larger) */
404 bool mFirstUpdate
{true};
406 std::atomic
<bool> mEOS
{false};
407 std::atomic
<bool> mFinalUpdate
{false};
409 VideoState(MovieState
&movie
) : mMovie(movie
) { }
413 SDL_DestroyTexture(mImage
);
417 nanoseconds
getClock();
419 void display(SDL_Window
*screen
, SDL_Renderer
*renderer
);
420 void updateVideo(SDL_Window
*screen
, SDL_Renderer
*renderer
, bool redraw
);
425 AVIOContextPtr mIOContext
;
426 AVFormatCtxPtr mFormatCtx
;
428 SyncMaster mAVSyncType
{SyncMaster::Default
};
430 microseconds mClockBase
{microseconds::min()};
432 std::atomic
<bool> mQuit
{false};
437 std::thread mParseThread
;
438 std::thread mAudioThread
;
439 std::thread mVideoThread
;
441 std::string mFilename
;
443 MovieState(std::string fname
)
444 : mAudio(*this), mVideo(*this), mFilename(std::move(fname
))
449 if(mParseThread
.joinable())
453 static int decode_interrupt_cb(void *ctx
);
455 void setTitle(SDL_Window
*window
);
457 nanoseconds
getClock();
459 nanoseconds
getMasterClock();
461 nanoseconds
getDuration();
463 int streamComponentOpen(unsigned int stream_index
);
468 nanoseconds
AudioState::getClockNoLock()
470 // The audio clock is the timestamp of the sample currently being heard.
471 if(alcGetInteger64vSOFT
)
473 // If device start time = min, we aren't playing yet.
474 if(mDeviceStartTime
== nanoseconds::min())
475 return nanoseconds::zero();
477 // Get the current device clock time and latency.
478 auto device
= alcGetContextsDevice(alcGetCurrentContext());
479 ALCint64SOFT devtimes
[2]{0,0};
480 alcGetInteger64vSOFT(device
, ALC_DEVICE_CLOCK_LATENCY_SOFT
, 2, devtimes
);
481 auto latency
= nanoseconds
{devtimes
[1]};
482 auto device_time
= nanoseconds
{devtimes
[0]};
484 // The clock is simply the current device time relative to the recorded
485 // start time. We can also subtract the latency to get more a accurate
486 // position of where the audio device actually is in the output stream.
487 return device_time
- mDeviceStartTime
- latency
;
490 if(mBufferDataSize
> 0)
492 if(mDeviceStartTime
== nanoseconds::min())
493 return nanoseconds::zero();
495 /* With a callback buffer and no device clock, mDeviceStartTime is
496 * actually the timestamp of the first sample frame played. The audio
497 * clock, then, is that plus the current source offset.
499 ALint64SOFT offset
[2];
500 if(alGetSourcei64vSOFT
)
501 alGetSourcei64vSOFT(mSource
, AL_SAMPLE_OFFSET_LATENCY_SOFT
, offset
);
505 alGetSourcei(mSource
, AL_SAMPLE_OFFSET
, &ioffset
);
506 offset
[0] = ALint64SOFT
{ioffset
} << 32;
509 /* NOTE: The source state must be checked last, in case an underrun
510 * occurs and the source stops between getting the state and retrieving
511 * the offset+latency.
514 alGetSourcei(mSource
, AL_SOURCE_STATE
, &status
);
517 if(status
== AL_PLAYING
|| status
== AL_PAUSED
)
518 pts
= mDeviceStartTime
- nanoseconds
{offset
[1]} +
519 duration_cast
<nanoseconds
>(fixed32
{offset
[0] / mCodecCtx
->sample_rate
});
522 /* If the source is stopped, the pts of the next sample to be heard
523 * is the pts of the next sample to be buffered, minus the amount
524 * already in the buffer ready to play.
526 const size_t woffset
{mWritePos
.load(std::memory_order_acquire
)};
527 const size_t roffset
{mReadPos
.load(std::memory_order_relaxed
)};
528 const size_t readable
{((woffset
>= roffset
) ? woffset
: (mBufferDataSize
+woffset
)) -
531 pts
= mCurrentPts
- nanoseconds
{seconds
{readable
/mFrameSize
}}/mCodecCtx
->sample_rate
;
537 /* The source-based clock is based on 4 components:
538 * 1 - The timestamp of the next sample to buffer (mCurrentPts)
539 * 2 - The length of the source's buffer queue
540 * (AudioBufferTime*AL_BUFFERS_QUEUED)
541 * 3 - The offset OpenAL is currently at in the source (the first value
542 * from AL_SAMPLE_OFFSET_LATENCY_SOFT)
543 * 4 - The latency between OpenAL and the DAC (the second value from
544 * AL_SAMPLE_OFFSET_LATENCY_SOFT)
546 * Subtracting the length of the source queue from the next sample's
547 * timestamp gives the timestamp of the sample at the start of the source
548 * queue. Adding the source offset to that results in the timestamp for the
549 * sample at OpenAL's current position, and subtracting the source latency
550 * from that gives the timestamp of the sample currently at the DAC.
552 nanoseconds pts
{mCurrentPts
};
555 ALint64SOFT offset
[2];
556 if(alGetSourcei64vSOFT
)
557 alGetSourcei64vSOFT(mSource
, AL_SAMPLE_OFFSET_LATENCY_SOFT
, offset
);
561 alGetSourcei(mSource
, AL_SAMPLE_OFFSET
, &ioffset
);
562 offset
[0] = ALint64SOFT
{ioffset
} << 32;
565 ALint queued
, status
;
566 alGetSourcei(mSource
, AL_BUFFERS_QUEUED
, &queued
);
567 alGetSourcei(mSource
, AL_SOURCE_STATE
, &status
);
569 /* If the source is AL_STOPPED, then there was an underrun and all
570 * buffers are processed, so ignore the source queue. The audio thread
571 * will put the source into an AL_INITIAL state and clear the queue
572 * when it starts recovery.
574 if(status
!= AL_STOPPED
)
576 pts
-= AudioBufferTime
*queued
;
577 pts
+= duration_cast
<nanoseconds
>(fixed32
{offset
[0] / mCodecCtx
->sample_rate
});
579 /* Don't offset by the latency if the source isn't playing. */
580 if(status
== AL_PLAYING
)
581 pts
-= nanoseconds
{offset
[1]};
584 return std::max(pts
, nanoseconds::zero());
587 bool AudioState::startPlayback()
589 const size_t woffset
{mWritePos
.load(std::memory_order_acquire
)};
590 const size_t roffset
{mReadPos
.load(std::memory_order_relaxed
)};
591 const size_t readable
{((woffset
>= roffset
) ? woffset
: (mBufferDataSize
+woffset
)) -
594 if(mBufferDataSize
> 0)
598 if(!alcGetInteger64vSOFT
)
599 mDeviceStartTime
= mCurrentPts
-
600 nanoseconds
{seconds
{readable
/mFrameSize
}}/mCodecCtx
->sample_rate
;
605 alGetSourcei(mSource
, AL_BUFFERS_QUEUED
, &queued
);
606 if(queued
== 0) return false;
609 alSourcePlay(mSource
);
610 if(alcGetInteger64vSOFT
)
612 /* Subtract the total buffer queue time from the current pts to get the
613 * pts of the start of the queue.
615 int64_t srctimes
[2]{0,0};
616 alGetSourcei64vSOFT(mSource
, AL_SAMPLE_OFFSET_CLOCK_SOFT
, srctimes
);
617 auto device_time
= nanoseconds
{srctimes
[1]};
618 auto src_offset
= duration_cast
<nanoseconds
>(fixed32
{srctimes
[0]}) /
619 mCodecCtx
->sample_rate
;
621 /* The mixer may have ticked and incremented the device time and sample
622 * offset, so subtract the source offset from the device time to get
623 * the device time the source started at. Also subtract startpts to get
624 * the device time the stream would have started at to reach where it
627 if(mBufferDataSize
> 0)
629 nanoseconds startpts
{mCurrentPts
-
630 nanoseconds
{seconds
{readable
/mFrameSize
}}/mCodecCtx
->sample_rate
};
631 mDeviceStartTime
= device_time
- src_offset
- startpts
;
635 nanoseconds startpts
{mCurrentPts
- AudioBufferTotalTime
};
636 mDeviceStartTime
= device_time
- src_offset
- startpts
;
642 int AudioState::getSync()
644 if(mMovie
.mAVSyncType
== SyncMaster::Audio
)
647 auto ref_clock
= mMovie
.getMasterClock();
648 auto diff
= ref_clock
- getClockNoLock();
650 if(!(diff
< AVNoSyncThreshold
&& diff
> -AVNoSyncThreshold
))
652 /* Difference is TOO big; reset accumulated average */
653 mClockDiffAvg
= seconds_d64::zero();
657 /* Accumulate the diffs */
658 mClockDiffAvg
= mClockDiffAvg
*AudioAvgFilterCoeff
+ diff
;
659 auto avg_diff
= mClockDiffAvg
*(1.0 - AudioAvgFilterCoeff
);
660 if(avg_diff
< AudioSyncThreshold
/2.0 && avg_diff
> -AudioSyncThreshold
)
663 /* Constrain the per-update difference to avoid exceedingly large skips */
664 diff
= std::min
<nanoseconds
>(diff
, AudioSampleCorrectionMax
);
665 return static_cast<int>(duration_cast
<seconds
>(diff
*mCodecCtx
->sample_rate
).count());
668 int AudioState::decodeFrame()
670 while(!mMovie
.mQuit
.load(std::memory_order_relaxed
))
673 while((ret
=avcodec_receive_frame(mCodecCtx
.get(), mDecodedFrame
.get())) == AVERROR(EAGAIN
))
674 mPackets
.sendTo(mCodecCtx
.get());
677 if(ret
== AVErrorEOF
) break;
678 std::cerr
<< "Failed to receive frame: "<<ret
<<std::endl
;
682 if(mDecodedFrame
->nb_samples
<= 0)
685 /* If provided, update w/ pts */
686 if(mDecodedFrame
->best_effort_timestamp
!= AVNoPtsValue
)
687 mCurrentPts
= duration_cast
<nanoseconds
>(seconds_d64
{av_q2d(mStream
->time_base
) *
688 static_cast<double>(mDecodedFrame
->best_effort_timestamp
)});
690 if(mDecodedFrame
->nb_samples
> mSamplesMax
)
693 av_samples_alloc(&mSamples
, nullptr, mCodecCtx
->channels
, mDecodedFrame
->nb_samples
,
695 mSamplesMax
= mDecodedFrame
->nb_samples
;
697 /* Return the amount of sample frames converted */
698 int data_size
{swr_convert(mSwresCtx
.get(), &mSamples
, mDecodedFrame
->nb_samples
,
699 const_cast<const uint8_t**>(mDecodedFrame
->data
), mDecodedFrame
->nb_samples
)};
701 av_frame_unref(mDecodedFrame
.get());
708 /* Duplicates the sample at in to out, count times. The frame size is a
709 * multiple of the template type size.
712 static void sample_dup(uint8_t *out
, const uint8_t *in
, size_t count
, size_t frame_size
)
714 auto *sample
= reinterpret_cast<const T
*>(in
);
715 auto *dst
= reinterpret_cast<T
*>(out
);
716 if(frame_size
== sizeof(T
))
717 std::fill_n(dst
, count
, *sample
);
720 /* NOTE: frame_size is a multiple of sizeof(T). */
721 size_t type_mult
{frame_size
/ sizeof(T
)};
723 std::generate_n(dst
, count
*type_mult
,
724 [sample
,type_mult
,&i
]() -> T
735 bool AudioState::readAudio(uint8_t *samples
, unsigned int length
, int &sample_skip
)
737 unsigned int audio_size
{0};
739 /* Read the next chunk of data, refill the buffer, and queue it
741 length
/= mFrameSize
;
742 while(mSamplesLen
> 0 && audio_size
< length
)
744 unsigned int rem
{length
- audio_size
};
747 const auto len
= static_cast<unsigned int>(mSamplesLen
- mSamplesPos
);
748 if(rem
> len
) rem
= len
;
749 std::copy_n(mSamples
+ static_cast<unsigned int>(mSamplesPos
)*mFrameSize
,
750 rem
*mFrameSize
, samples
);
754 rem
= std::min(rem
, static_cast<unsigned int>(-mSamplesPos
));
756 /* Add samples by copying the first sample */
757 if((mFrameSize
&7) == 0)
758 sample_dup
<uint64_t>(samples
, mSamples
, rem
, mFrameSize
);
759 else if((mFrameSize
&3) == 0)
760 sample_dup
<uint32_t>(samples
, mSamples
, rem
, mFrameSize
);
761 else if((mFrameSize
&1) == 0)
762 sample_dup
<uint16_t>(samples
, mSamples
, rem
, mFrameSize
);
764 sample_dup
<uint8_t>(samples
, mSamples
, rem
, mFrameSize
);
768 mCurrentPts
+= nanoseconds
{seconds
{rem
}} / mCodecCtx
->sample_rate
;
769 samples
+= rem
*mFrameSize
;
772 while(mSamplesPos
>= mSamplesLen
)
774 int frame_len
= decodeFrame();
775 if(frame_len
<= 0) break;
777 mSamplesLen
= frame_len
;
778 mSamplesPos
= std::min(mSamplesLen
, sample_skip
);
779 sample_skip
-= mSamplesPos
;
781 // Adjust the device start time and current pts by the amount we're
782 // skipping/duplicating, so that the clock remains correct for the
783 // current stream position.
784 auto skip
= nanoseconds
{seconds
{mSamplesPos
}} / mCodecCtx
->sample_rate
;
785 mDeviceStartTime
-= skip
;
793 if(audio_size
< length
)
795 const unsigned int rem
{length
- audio_size
};
796 std::fill_n(samples
, rem
*mFrameSize
,
797 (mDstSampleFmt
== AV_SAMPLE_FMT_U8
) ? 0x80 : 0x00);
798 mCurrentPts
+= nanoseconds
{seconds
{rem
}} / mCodecCtx
->sample_rate
;
804 void AudioState::readAudio(int sample_skip
)
806 size_t woffset
{mWritePos
.load(std::memory_order_acquire
)};
807 while(mSamplesLen
> 0)
809 const size_t roffset
{mReadPos
.load(std::memory_order_relaxed
)};
813 size_t rem
{(((roffset
> woffset
) ? roffset
-1
814 : ((roffset
== 0) ? mBufferDataSize
-1
815 : mBufferDataSize
)) - woffset
) / mFrameSize
};
816 rem
= std::min
<size_t>(rem
, static_cast<ALuint
>(-mSamplesPos
));
819 auto *splout
{&mBufferData
[woffset
]};
820 if((mFrameSize
&7) == 0)
821 sample_dup
<uint64_t>(splout
, mSamples
, rem
, mFrameSize
);
822 else if((mFrameSize
&3) == 0)
823 sample_dup
<uint32_t>(splout
, mSamples
, rem
, mFrameSize
);
824 else if((mFrameSize
&1) == 0)
825 sample_dup
<uint16_t>(splout
, mSamples
, rem
, mFrameSize
);
827 sample_dup
<uint8_t>(splout
, mSamples
, rem
, mFrameSize
);
828 woffset
+= rem
* mFrameSize
;
829 if(woffset
== mBufferDataSize
)
831 mWritePos
.store(woffset
, std::memory_order_release
);
832 mSamplesPos
+= static_cast<int>(rem
);
833 mCurrentPts
+= nanoseconds
{seconds
{rem
}} / mCodecCtx
->sample_rate
;
837 const size_t boffset
{static_cast<ALuint
>(mSamplesPos
) * size_t{mFrameSize
}};
838 const size_t nbytes
{static_cast<ALuint
>(mSamplesLen
)*size_t{mFrameSize
} -
840 if(roffset
> woffset
)
842 const size_t writable
{roffset
-woffset
-1};
843 if(writable
< nbytes
) break;
845 memcpy(&mBufferData
[woffset
], mSamples
+boffset
, nbytes
);
850 const size_t writable
{mBufferDataSize
+roffset
-woffset
-1};
851 if(writable
< nbytes
) break;
853 const size_t todo1
{std::min
<size_t>(nbytes
, mBufferDataSize
-woffset
)};
854 const size_t todo2
{nbytes
- todo1
};
856 memcpy(&mBufferData
[woffset
], mSamples
+boffset
, todo1
);
858 if(woffset
== mBufferDataSize
)
863 memcpy(&mBufferData
[woffset
], mSamples
+boffset
+todo1
, todo2
);
868 mWritePos
.store(woffset
, std::memory_order_release
);
869 mCurrentPts
+= nanoseconds
{seconds
{mSamplesLen
-mSamplesPos
}} / mCodecCtx
->sample_rate
;
872 mSamplesLen
= decodeFrame();
873 if(mSamplesLen
<= 0) break;
875 mSamplesPos
= std::min(mSamplesLen
, sample_skip
);
876 sample_skip
-= mSamplesPos
;
878 auto skip
= nanoseconds
{seconds
{mSamplesPos
}} / mCodecCtx
->sample_rate
;
879 mDeviceStartTime
-= skip
;
881 } while(mSamplesPos
>= mSamplesLen
);
886 #ifdef AL_SOFT_events
887 void AL_APIENTRY
AudioState::EventCallback(ALenum eventType
, ALuint object
, ALuint param
,
888 ALsizei length
, const ALchar
*message
, void *userParam
)
890 auto self
= static_cast<AudioState
*>(userParam
);
892 if(eventType
== AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT
)
894 /* Temporarily lock the source mutex to ensure it's not between
895 * checking the processed count and going to sleep.
897 std::unique_lock
<std::mutex
>{self
->mSrcMutex
}.unlock();
898 self
->mSrcCond
.notify_one();
902 std::cout
<< "\n---- AL Event on AudioState "<<self
<<" ----\nEvent: ";
905 case AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT
: std::cout
<< "Buffer completed"; break;
906 case AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT
: std::cout
<< "Source state changed"; break;
907 case AL_EVENT_TYPE_ERROR_SOFT
: std::cout
<< "API error"; break;
908 case AL_EVENT_TYPE_PERFORMANCE_SOFT
: std::cout
<< "Performance"; break;
909 case AL_EVENT_TYPE_DEPRECATED_SOFT
: std::cout
<< "Deprecated"; break;
910 case AL_EVENT_TYPE_DISCONNECTED_SOFT
: std::cout
<< "Disconnected"; break;
912 std::cout
<< "0x"<<std::hex
<<std::setw(4)<<std::setfill('0')<<eventType
<<std::dec
<<
913 std::setw(0)<<std::setfill(' '); break;
916 "Object ID: "<<object
<<"\n"
917 "Parameter: "<<param
<<"\n"
918 "Message: "<<std::string
{message
, static_cast<ALuint
>(length
)}<<"\n----"<<
921 if(eventType
== AL_EVENT_TYPE_DISCONNECTED_SOFT
)
924 std::lock_guard
<std::mutex
> lock
{self
->mSrcMutex
};
925 self
->mConnected
.clear(std::memory_order_release
);
927 self
->mSrcCond
.notify_one();
932 #ifdef AL_SOFT_callback_buffer
933 ALsizei
AudioState::bufferCallback(void *data
, ALsizei size
)
937 size_t roffset
{mReadPos
.load(std::memory_order_acquire
)};
940 const size_t woffset
{mWritePos
.load(std::memory_order_relaxed
)};
941 if(woffset
== roffset
) break;
943 size_t todo
{((woffset
< roffset
) ? mBufferDataSize
: woffset
) - roffset
};
944 todo
= std::min
<size_t>(todo
, static_cast<ALuint
>(size
-got
));
946 memcpy(data
, &mBufferData
[roffset
], todo
);
947 data
= static_cast<ALbyte
*>(data
) + todo
;
948 got
+= static_cast<ALsizei
>(todo
);
951 if(roffset
== mBufferDataSize
)
954 mReadPos
.store(roffset
, std::memory_order_release
);
960 int AudioState::handler()
962 std::unique_lock
<std::mutex
> srclock
{mSrcMutex
, std::defer_lock
};
963 milliseconds sleep_time
{AudioBufferTime
/ 3};
966 #ifdef AL_SOFT_events
967 const std::array
<ALenum
,6> evt_types
{{
968 AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT
, AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT
,
969 AL_EVENT_TYPE_ERROR_SOFT
, AL_EVENT_TYPE_PERFORMANCE_SOFT
, AL_EVENT_TYPE_DEPRECATED_SOFT
,
970 AL_EVENT_TYPE_DISCONNECTED_SOFT
}};
971 if(alEventControlSOFT
)
973 alEventControlSOFT(evt_types
.size(), evt_types
.data(), AL_TRUE
);
974 alEventCallbackSOFT(EventCallback
, this);
975 sleep_time
= AudioBufferTotalTime
;
978 #ifdef AL_SOFT_bformat_ex
979 const bool has_bfmt_ex
{alIsExtensionPresent("AL_SOFT_bformat_ex") != AL_FALSE
};
980 ALenum ambi_layout
{AL_FUMA_SOFT
};
981 ALenum ambi_scale
{AL_FUMA_SOFT
};
984 /* Find a suitable format for OpenAL. */
987 if((mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_FLT
|| mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_FLTP
) &&
988 alIsExtensionPresent("AL_EXT_FLOAT32"))
990 mDstSampleFmt
= AV_SAMPLE_FMT_FLT
;
992 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_7POINT1
&&
993 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
994 (fmt
=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE
&& fmt
!= -1)
996 mDstChanLayout
= mCodecCtx
->channel_layout
;
1000 if((mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1
||
1001 mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1_BACK
) &&
1002 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
1003 (fmt
=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE
&& fmt
!= -1)
1005 mDstChanLayout
= mCodecCtx
->channel_layout
;
1009 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_MONO
)
1011 mDstChanLayout
= mCodecCtx
->channel_layout
;
1013 mFormat
= AL_FORMAT_MONO_FLOAT32
;
1015 /* Assume 3D B-Format (ambisonics) if the channel layout is blank and
1016 * there's 4 or more channels. FFmpeg/libavcodec otherwise seems to
1017 * have no way to specify if the source is actually B-Format (let alone
1018 * if it's 2D or 3D).
1020 if(mCodecCtx
->channel_layout
== 0 && mCodecCtx
->channels
>= 4 &&
1021 alIsExtensionPresent("AL_EXT_BFORMAT") &&
1022 (fmt
=alGetEnumValue("AL_FORMAT_BFORMAT3D_FLOAT32")) != AL_NONE
&& fmt
!= -1)
1024 int order
{static_cast<int>(std::sqrt(mCodecCtx
->channels
)) - 1};
1025 if((order
+1)*(order
+1) == mCodecCtx
->channels
||
1026 (order
+1)*(order
+1) + 2 == mCodecCtx
->channels
)
1028 /* OpenAL only supports first-order with AL_EXT_BFORMAT, which
1029 * is 4 channels for 3D buffers.
1037 mDstChanLayout
= AV_CH_LAYOUT_STEREO
;
1039 mFormat
= AL_FORMAT_STEREO_FLOAT32
;
1042 if(mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_U8
|| mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_U8P
)
1044 mDstSampleFmt
= AV_SAMPLE_FMT_U8
;
1046 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_7POINT1
&&
1047 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
1048 (fmt
=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE
&& fmt
!= -1)
1050 mDstChanLayout
= mCodecCtx
->channel_layout
;
1054 if((mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1
||
1055 mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1_BACK
) &&
1056 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
1057 (fmt
=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE
&& fmt
!= -1)
1059 mDstChanLayout
= mCodecCtx
->channel_layout
;
1063 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_MONO
)
1065 mDstChanLayout
= mCodecCtx
->channel_layout
;
1067 mFormat
= AL_FORMAT_MONO8
;
1069 if(mCodecCtx
->channel_layout
== 0 && mCodecCtx
->channels
>= 4 &&
1070 alIsExtensionPresent("AL_EXT_BFORMAT") &&
1071 (fmt
=alGetEnumValue("AL_FORMAT_BFORMAT3D8")) != AL_NONE
&& fmt
!= -1)
1073 int order
{static_cast<int>(std::sqrt(mCodecCtx
->channels
)) - 1};
1074 if((order
+1)*(order
+1) == mCodecCtx
->channels
||
1075 (order
+1)*(order
+1) + 2 == mCodecCtx
->channels
)
1083 mDstChanLayout
= AV_CH_LAYOUT_STEREO
;
1085 mFormat
= AL_FORMAT_STEREO8
;
1090 mDstSampleFmt
= AV_SAMPLE_FMT_S16
;
1092 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_7POINT1
&&
1093 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
1094 (fmt
=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE
&& fmt
!= -1)
1096 mDstChanLayout
= mCodecCtx
->channel_layout
;
1100 if((mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1
||
1101 mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1_BACK
) &&
1102 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
1103 (fmt
=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE
&& fmt
!= -1)
1105 mDstChanLayout
= mCodecCtx
->channel_layout
;
1109 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_MONO
)
1111 mDstChanLayout
= mCodecCtx
->channel_layout
;
1113 mFormat
= AL_FORMAT_MONO16
;
1115 if(mCodecCtx
->channel_layout
== 0 && mCodecCtx
->channels
>= 4 &&
1116 alIsExtensionPresent("AL_EXT_BFORMAT") &&
1117 (fmt
=alGetEnumValue("AL_FORMAT_BFORMAT3D16")) != AL_NONE
&& fmt
!= -1)
1119 int order
{static_cast<int>(std::sqrt(mCodecCtx
->channels
)) - 1};
1120 if((order
+1)*(order
+1) == mCodecCtx
->channels
||
1121 (order
+1)*(order
+1) + 2 == mCodecCtx
->channels
)
1129 mDstChanLayout
= AV_CH_LAYOUT_STEREO
;
1131 mFormat
= AL_FORMAT_STEREO16
;
1134 void *samples
{nullptr};
1135 ALsizei buffer_len
{0};
1142 mDecodedFrame
.reset(av_frame_alloc());
1145 std::cerr
<< "Failed to allocate audio frame" <<std::endl
;
1151 /* OpenAL only supports first-order ambisonics with AL_EXT_BFORMAT, so
1152 * we have to drop any extra channels.
1154 mSwresCtx
.reset(swr_alloc_set_opts(nullptr,
1155 (1_i64
<<4)-1, mDstSampleFmt
, mCodecCtx
->sample_rate
,
1156 (1_i64
<<mCodecCtx
->channels
)-1, mCodecCtx
->sample_fmt
, mCodecCtx
->sample_rate
,
1159 /* Note that ffmpeg/libavcodec has no method to check the ambisonic
1160 * channel order and normalization, so we can only assume AmbiX as the
1161 * defacto-standard. This is not true for .amb files, which use FuMa.
1163 std::vector
<double> mtx(64*64, 0.0);
1164 #ifdef AL_SOFT_bformat_ex
1165 ambi_layout
= AL_ACN_SOFT
;
1166 ambi_scale
= AL_SN3D_SOFT
;
1169 /* An identity matrix that doesn't remix any channels. */
1170 std::cout
<< "Found AL_SOFT_bformat_ex" <<std::endl
;
1171 mtx
[0 + 0*64] = 1.0;
1172 mtx
[1 + 1*64] = 1.0;
1173 mtx
[2 + 2*64] = 1.0;
1174 mtx
[3 + 3*64] = 1.0;
1179 std::cout
<< "Found AL_EXT_BFORMAT" <<std::endl
;
1180 /* Without AL_SOFT_bformat_ex, OpenAL only supports FuMa channel
1181 * ordering and normalization, so a custom matrix is needed to
1182 * scale and reorder the source from AmbiX.
1184 mtx
[0 + 0*64] = std::sqrt(0.5);
1185 mtx
[3 + 1*64] = 1.0;
1186 mtx
[1 + 2*64] = 1.0;
1187 mtx
[2 + 3*64] = 1.0;
1189 swr_set_matrix(mSwresCtx
.get(), mtx
.data(), 64);
1192 mSwresCtx
.reset(swr_alloc_set_opts(nullptr,
1193 static_cast<int64_t>(mDstChanLayout
), mDstSampleFmt
, mCodecCtx
->sample_rate
,
1194 mCodecCtx
->channel_layout
? static_cast<int64_t>(mCodecCtx
->channel_layout
)
1195 : av_get_default_channel_layout(mCodecCtx
->channels
),
1196 mCodecCtx
->sample_fmt
, mCodecCtx
->sample_rate
,
1198 if(!mSwresCtx
|| swr_init(mSwresCtx
.get()) != 0)
1200 std::cerr
<< "Failed to initialize audio converter" <<std::endl
;
1204 alGenBuffers(static_cast<ALsizei
>(mBuffers
.size()), mBuffers
.data());
1205 alGenSources(1, &mSource
);
1208 alSourcei(mSource
, AL_DIRECT_CHANNELS_SOFT
, DirectOutMode
);
1209 if(EnableWideStereo
)
1211 const float angles
[2]{static_cast<float>(M_PI
/ 3.0), static_cast<float>(-M_PI
/ 3.0)};
1212 alSourcefv(mSource
, AL_STEREO_ANGLES
, angles
);
1214 #ifdef AL_SOFT_bformat_ex
1217 for(ALuint bufid
: mBuffers
)
1219 alBufferi(bufid
, AL_AMBISONIC_LAYOUT_SOFT
, ambi_layout
);
1220 alBufferi(bufid
, AL_AMBISONIC_SCALING_SOFT
, ambi_scale
);
1225 if(alGetError() != AL_NO_ERROR
)
1228 #ifdef AL_SOFT_callback_buffer
1229 if(alBufferCallbackSOFT
)
1231 alBufferCallbackSOFT(mBuffers
[0], mFormat
, mCodecCtx
->sample_rate
, bufferCallbackC
, this,
1233 alSourcei(mSource
, AL_BUFFER
, static_cast<ALint
>(mBuffers
[0]));
1234 if(alGetError() != AL_NO_ERROR
)
1236 fprintf(stderr
, "Failed to set buffer callback\n");
1237 alSourcei(mSource
, AL_BUFFER
, 0);
1238 buffer_len
= static_cast<int>(duration_cast
<seconds
>(mCodecCtx
->sample_rate
*
1239 AudioBufferTime
).count() * mFrameSize
);
1243 mBufferDataSize
= static_cast<size_t>(duration_cast
<seconds
>(mCodecCtx
->sample_rate
*
1244 AudioBufferTotalTime
).count()) * mFrameSize
;
1245 mBufferData
.reset(new uint8_t[mBufferDataSize
]);
1246 mReadPos
.store(0, std::memory_order_relaxed
);
1247 mWritePos
.store(0, std::memory_order_relaxed
);
1250 alcGetIntegerv(alcGetContextsDevice(alcGetCurrentContext()), ALC_REFRESH
, 1, &refresh
);
1251 sleep_time
= milliseconds
{seconds
{1}} / refresh
;
1256 buffer_len
= static_cast<int>(duration_cast
<seconds
>(mCodecCtx
->sample_rate
*
1257 AudioBufferTime
).count() * mFrameSize
);
1259 samples
= av_malloc(static_cast<ALuint
>(buffer_len
));
1261 /* Prefill the codec buffer. */
1263 const int ret
{mPackets
.sendTo(mCodecCtx
.get())};
1264 if(ret
== AVERROR(EAGAIN
) || ret
== AVErrorEOF
)
1269 if(alcGetInteger64vSOFT
)
1272 alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()), ALC_DEVICE_CLOCK_SOFT
,
1274 mDeviceStartTime
= nanoseconds
{devtime
} - mCurrentPts
;
1277 mSamplesLen
= decodeFrame();
1280 mSamplesPos
= std::min(mSamplesLen
, getSync());
1282 auto skip
= nanoseconds
{seconds
{mSamplesPos
}} / mCodecCtx
->sample_rate
;
1283 mDeviceStartTime
-= skip
;
1284 mCurrentPts
+= skip
;
1287 while(!mMovie
.mQuit
.load(std::memory_order_relaxed
)
1288 && mConnected
.test_and_set(std::memory_order_relaxed
))
1291 if(mBufferDataSize
> 0)
1293 alGetSourcei(mSource
, AL_SOURCE_STATE
, &state
);
1294 readAudio(getSync());
1298 ALint processed
, queued
;
1300 /* First remove any processed buffers. */
1301 alGetSourcei(mSource
, AL_BUFFERS_PROCESSED
, &processed
);
1302 while(processed
> 0)
1305 alSourceUnqueueBuffers(mSource
, 1, &bid
);
1309 /* Refill the buffer queue. */
1310 int sync_skip
{getSync()};
1311 alGetSourcei(mSource
, AL_BUFFERS_QUEUED
, &queued
);
1312 while(static_cast<ALuint
>(queued
) < mBuffers
.size())
1314 /* Read the next chunk of data, filling the buffer, and queue
1317 const bool got_audio
{readAudio(static_cast<uint8_t*>(samples
),
1318 static_cast<ALuint
>(buffer_len
), sync_skip
)};
1319 if(!got_audio
) break;
1321 const ALuint bufid
{mBuffers
[mBufferIdx
]};
1322 mBufferIdx
= static_cast<ALuint
>((mBufferIdx
+1) % mBuffers
.size());
1324 alBufferData(bufid
, mFormat
, samples
, buffer_len
, mCodecCtx
->sample_rate
);
1325 alSourceQueueBuffers(mSource
, 1, &bufid
);
1329 /* Check that the source is playing. */
1330 alGetSourcei(mSource
, AL_SOURCE_STATE
, &state
);
1331 if(state
== AL_STOPPED
)
1333 /* AL_STOPPED means there was an underrun. Clear the buffer
1334 * queue since this likely means we're late, and rewind the
1335 * source to get it back into an AL_INITIAL state.
1337 alSourceRewind(mSource
);
1338 alSourcei(mSource
, AL_BUFFER
, 0);
1339 if(alcGetInteger64vSOFT
)
1341 /* Also update the device start time with the current
1342 * device clock, so the decoder knows we're running behind.
1345 alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()),
1346 ALC_DEVICE_CLOCK_SOFT
, 1, &devtime
);
1347 mDeviceStartTime
= nanoseconds
{devtime
} - mCurrentPts
;
1353 /* (re)start the source if needed, and wait for a buffer to finish */
1354 if(state
!= AL_PLAYING
&& state
!= AL_PAUSED
)
1356 if(!startPlayback())
1359 if(alGetError() != AL_NO_ERROR
)
1362 mSrcCond
.wait_for(srclock
, sleep_time
);
1365 alSourceRewind(mSource
);
1366 alSourcei(mSource
, AL_BUFFER
, 0);
1372 #ifdef AL_SOFT_events
1373 if(alEventControlSOFT
)
1375 alEventControlSOFT(evt_types
.size(), evt_types
.data(), AL_FALSE
);
1376 alEventCallbackSOFT(nullptr, nullptr);
1384 nanoseconds
VideoState::getClock()
1386 /* NOTE: This returns incorrect times while not playing. */
1387 std::lock_guard
<std::mutex
> _
{mDispPtsMutex
};
1388 if(mDisplayPtsTime
== microseconds::min())
1389 return nanoseconds::zero();
1390 auto delta
= get_avtime() - mDisplayPtsTime
;
1391 return mDisplayPts
+ delta
;
1394 /* Called by VideoState::updateVideo to display the next video frame. */
1395 void VideoState::display(SDL_Window
*screen
, SDL_Renderer
*renderer
)
1400 double aspect_ratio
;
1404 if(mCodecCtx
->sample_aspect_ratio
.num
== 0)
1408 aspect_ratio
= av_q2d(mCodecCtx
->sample_aspect_ratio
) * mCodecCtx
->width
/
1411 if(aspect_ratio
<= 0.0)
1412 aspect_ratio
= static_cast<double>(mCodecCtx
->width
) / mCodecCtx
->height
;
1414 SDL_GetWindowSize(screen
, &win_w
, &win_h
);
1416 w
= (static_cast<int>(std::rint(h
* aspect_ratio
)) + 3) & ~3;
1420 h
= (static_cast<int>(std::rint(w
/ aspect_ratio
)) + 3) & ~3;
1422 x
= (win_w
- w
) / 2;
1423 y
= (win_h
- h
) / 2;
1425 SDL_Rect src_rect
{ 0, 0, mWidth
, mHeight
};
1426 SDL_Rect dst_rect
{ x
, y
, w
, h
};
1427 SDL_RenderCopy(renderer
, mImage
, &src_rect
, &dst_rect
);
1428 SDL_RenderPresent(renderer
);
1431 /* Called regularly on the main thread where the SDL_Renderer was created. It
1432 * handles updating the textures of decoded frames and displaying the latest
1435 void VideoState::updateVideo(SDL_Window
*screen
, SDL_Renderer
*renderer
, bool redraw
)
1437 size_t read_idx
{mPictQRead
.load(std::memory_order_relaxed
)};
1438 Picture
*vp
{&mPictQ
[read_idx
]};
1440 auto clocktime
= mMovie
.getMasterClock();
1441 bool updated
{false};
1444 size_t next_idx
{(read_idx
+1)%mPictQ
.size()};
1445 if(next_idx
== mPictQWrite
.load(std::memory_order_acquire
))
1447 Picture
*nextvp
{&mPictQ
[next_idx
]};
1448 if(clocktime
< nextvp
->mPts
)
1453 read_idx
= next_idx
;
1455 if(mMovie
.mQuit
.load(std::memory_order_relaxed
))
1458 mFinalUpdate
= true;
1459 mPictQRead
.store(read_idx
, std::memory_order_release
);
1460 std::unique_lock
<std::mutex
>{mPictQMutex
}.unlock();
1461 mPictQCond
.notify_one();
1467 mPictQRead
.store(read_idx
, std::memory_order_release
);
1468 std::unique_lock
<std::mutex
>{mPictQMutex
}.unlock();
1469 mPictQCond
.notify_one();
1471 /* allocate or resize the buffer! */
1472 bool fmt_updated
{false};
1473 if(!mImage
|| mWidth
!= mCodecCtx
->width
|| mHeight
!= mCodecCtx
->height
)
1477 SDL_DestroyTexture(mImage
);
1478 mImage
= SDL_CreateTexture(renderer
, SDL_PIXELFORMAT_IYUV
, SDL_TEXTUREACCESS_STREAMING
,
1479 mCodecCtx
->coded_width
, mCodecCtx
->coded_height
);
1481 std::cerr
<< "Failed to create YV12 texture!" <<std::endl
;
1482 mWidth
= mCodecCtx
->width
;
1483 mHeight
= mCodecCtx
->height
;
1485 if(mFirstUpdate
&& mWidth
> 0 && mHeight
> 0)
1487 /* For the first update, set the window size to the video size. */
1488 mFirstUpdate
= false;
1492 if(mCodecCtx
->sample_aspect_ratio
.den
!= 0)
1494 double aspect_ratio
= av_q2d(mCodecCtx
->sample_aspect_ratio
);
1495 if(aspect_ratio
>= 1.0)
1496 w
= static_cast<int>(w
*aspect_ratio
+ 0.5);
1497 else if(aspect_ratio
> 0.0)
1498 h
= static_cast<int>(h
/aspect_ratio
+ 0.5);
1500 SDL_SetWindowSize(screen
, w
, h
);
1506 AVFrame
*frame
{vp
->mFrame
.get()};
1507 void *pixels
{nullptr};
1510 if(mCodecCtx
->pix_fmt
== AV_PIX_FMT_YUV420P
)
1511 SDL_UpdateYUVTexture(mImage
, nullptr,
1512 frame
->data
[0], frame
->linesize
[0],
1513 frame
->data
[1], frame
->linesize
[1],
1514 frame
->data
[2], frame
->linesize
[2]
1516 else if(SDL_LockTexture(mImage
, nullptr, &pixels
, &pitch
) != 0)
1517 std::cerr
<< "Failed to lock texture" <<std::endl
;
1520 // Convert the image into YUV format that SDL uses
1521 int coded_w
{mCodecCtx
->coded_width
};
1522 int coded_h
{mCodecCtx
->coded_height
};
1523 int w
{mCodecCtx
->width
};
1524 int h
{mCodecCtx
->height
};
1525 if(!mSwscaleCtx
|| fmt_updated
)
1527 mSwscaleCtx
.reset(sws_getContext(
1528 w
, h
, mCodecCtx
->pix_fmt
,
1529 w
, h
, AV_PIX_FMT_YUV420P
, 0,
1530 nullptr, nullptr, nullptr
1534 /* point pict at the queue */
1535 uint8_t *pict_data
[3];
1536 pict_data
[0] = static_cast<uint8_t*>(pixels
);
1537 pict_data
[1] = pict_data
[0] + coded_w
*coded_h
;
1538 pict_data
[2] = pict_data
[1] + coded_w
*coded_h
/4;
1540 int pict_linesize
[3];
1541 pict_linesize
[0] = pitch
;
1542 pict_linesize
[1] = pitch
/ 2;
1543 pict_linesize
[2] = pitch
/ 2;
1545 sws_scale(mSwscaleCtx
.get(), reinterpret_cast<uint8_t**>(frame
->data
), frame
->linesize
,
1546 0, h
, pict_data
, pict_linesize
);
1547 SDL_UnlockTexture(mImage
);
1556 /* Show the picture! */
1557 display(screen
, renderer
);
1562 auto disp_time
= get_avtime();
1564 std::lock_guard
<std::mutex
> _
{mDispPtsMutex
};
1565 mDisplayPts
= vp
->mPts
;
1566 mDisplayPtsTime
= disp_time
;
1568 if(mEOS
.load(std::memory_order_acquire
))
1570 if((read_idx
+1)%mPictQ
.size() == mPictQWrite
.load(std::memory_order_acquire
))
1572 mFinalUpdate
= true;
1573 std::unique_lock
<std::mutex
>{mPictQMutex
}.unlock();
1574 mPictQCond
.notify_one();
1579 int VideoState::handler()
1581 std::for_each(mPictQ
.begin(), mPictQ
.end(),
1582 [](Picture
&pict
) -> void
1583 { pict
.mFrame
= AVFramePtr
{av_frame_alloc()}; });
1585 /* Prefill the codec buffer. */
1587 const int ret
{mPackets
.sendTo(mCodecCtx
.get())};
1588 if(ret
== AVERROR(EAGAIN
) || ret
== AVErrorEOF
)
1593 std::lock_guard
<std::mutex
> _
{mDispPtsMutex
};
1594 mDisplayPtsTime
= get_avtime();
1597 auto current_pts
= nanoseconds::zero();
1598 while(!mMovie
.mQuit
.load(std::memory_order_relaxed
))
1600 size_t write_idx
{mPictQWrite
.load(std::memory_order_relaxed
)};
1601 Picture
*vp
{&mPictQ
[write_idx
]};
1603 /* Retrieve video frame. */
1604 AVFrame
*decoded_frame
{vp
->mFrame
.get()};
1606 while((ret
=avcodec_receive_frame(mCodecCtx
.get(), decoded_frame
)) == AVERROR(EAGAIN
))
1607 mPackets
.sendTo(mCodecCtx
.get());
1610 if(ret
== AVErrorEOF
) break;
1611 std::cerr
<< "Failed to receive frame: "<<ret
<<std::endl
;
1615 /* Get the PTS for this frame. */
1616 if(decoded_frame
->best_effort_timestamp
!= AVNoPtsValue
)
1617 current_pts
= duration_cast
<nanoseconds
>(seconds_d64
{av_q2d(mStream
->time_base
) *
1618 static_cast<double>(decoded_frame
->best_effort_timestamp
)});
1619 vp
->mPts
= current_pts
;
1621 /* Update the video clock to the next expected PTS. */
1622 auto frame_delay
= av_q2d(mCodecCtx
->time_base
);
1623 frame_delay
+= decoded_frame
->repeat_pict
* (frame_delay
* 0.5);
1624 current_pts
+= duration_cast
<nanoseconds
>(seconds_d64
{frame_delay
});
1626 /* Put the frame in the queue to be loaded into a texture and displayed
1627 * by the rendering thread.
1629 write_idx
= (write_idx
+1)%mPictQ
.size();
1630 mPictQWrite
.store(write_idx
, std::memory_order_release
);
1632 /* Send a packet now so it's hopefully ready by the time it's needed. */
1633 mPackets
.sendTo(mCodecCtx
.get());
1635 if(write_idx
== mPictQRead
.load(std::memory_order_acquire
))
1637 /* Wait until we have space for a new pic */
1638 std::unique_lock
<std::mutex
> lock
{mPictQMutex
};
1639 while(write_idx
== mPictQRead
.load(std::memory_order_acquire
) &&
1640 !mMovie
.mQuit
.load(std::memory_order_relaxed
))
1641 mPictQCond
.wait(lock
);
1646 std::unique_lock
<std::mutex
> lock
{mPictQMutex
};
1647 while(!mFinalUpdate
) mPictQCond
.wait(lock
);
1653 int MovieState::decode_interrupt_cb(void *ctx
)
1655 return static_cast<MovieState
*>(ctx
)->mQuit
.load(std::memory_order_relaxed
);
1658 bool MovieState::prepare()
1660 AVIOContext
*avioctx
{nullptr};
1661 AVIOInterruptCB intcb
{decode_interrupt_cb
, this};
1662 if(avio_open2(&avioctx
, mFilename
.c_str(), AVIO_FLAG_READ
, &intcb
, nullptr))
1664 std::cerr
<< "Failed to open "<<mFilename
<<std::endl
;
1667 mIOContext
.reset(avioctx
);
1669 /* Open movie file. If avformat_open_input fails it will automatically free
1670 * this context, so don't set it onto a smart pointer yet.
1672 AVFormatContext
*fmtctx
{avformat_alloc_context()};
1673 fmtctx
->pb
= mIOContext
.get();
1674 fmtctx
->interrupt_callback
= intcb
;
1675 if(avformat_open_input(&fmtctx
, mFilename
.c_str(), nullptr, nullptr) != 0)
1677 std::cerr
<< "Failed to open "<<mFilename
<<std::endl
;
1680 mFormatCtx
.reset(fmtctx
);
1682 /* Retrieve stream information */
1683 if(avformat_find_stream_info(mFormatCtx
.get(), nullptr) < 0)
1685 std::cerr
<< mFilename
<<": failed to find stream info" <<std::endl
;
1689 /* Dump information about file onto standard error */
1690 av_dump_format(mFormatCtx
.get(), 0, mFilename
.c_str(), 0);
1692 mParseThread
= std::thread
{std::mem_fn(&MovieState::parse_handler
), this};
1696 void MovieState::setTitle(SDL_Window
*window
)
1698 auto pos1
= mFilename
.rfind('/');
1699 auto pos2
= mFilename
.rfind('\\');
1700 auto fpos
= ((pos1
== std::string::npos
) ? pos2
:
1701 (pos2
== std::string::npos
) ? pos1
:
1702 std::max(pos1
, pos2
)) + 1;
1703 SDL_SetWindowTitle(window
, (mFilename
.substr(fpos
)+" - "+AppName
).c_str());
1706 nanoseconds
MovieState::getClock()
1708 if(mClockBase
== microseconds::min())
1709 return nanoseconds::zero();
1710 return get_avtime() - mClockBase
;
1713 nanoseconds
MovieState::getMasterClock()
1715 if(mAVSyncType
== SyncMaster::Video
)
1716 return mVideo
.getClock();
1717 if(mAVSyncType
== SyncMaster::Audio
)
1718 return mAudio
.getClock();
1722 nanoseconds
MovieState::getDuration()
1723 { return std::chrono::duration
<int64_t,std::ratio
<1,AV_TIME_BASE
>>(mFormatCtx
->duration
); }
1725 int MovieState::streamComponentOpen(unsigned int stream_index
)
1727 if(stream_index
>= mFormatCtx
->nb_streams
)
1730 /* Get a pointer to the codec context for the stream, and open the
1733 AVCodecCtxPtr avctx
{avcodec_alloc_context3(nullptr)};
1734 if(!avctx
) return -1;
1736 if(avcodec_parameters_to_context(avctx
.get(), mFormatCtx
->streams
[stream_index
]->codecpar
))
1739 AVCodec
*codec
{avcodec_find_decoder(avctx
->codec_id
)};
1740 if(!codec
|| avcodec_open2(avctx
.get(), codec
, nullptr) < 0)
1742 std::cerr
<< "Unsupported codec: "<<avcodec_get_name(avctx
->codec_id
)
1743 << " (0x"<<std::hex
<<avctx
->codec_id
<<std::dec
<<")" <<std::endl
;
1747 /* Initialize and start the media type handler */
1748 switch(avctx
->codec_type
)
1750 case AVMEDIA_TYPE_AUDIO
:
1751 mAudio
.mStream
= mFormatCtx
->streams
[stream_index
];
1752 mAudio
.mCodecCtx
= std::move(avctx
);
1755 case AVMEDIA_TYPE_VIDEO
:
1756 mVideo
.mStream
= mFormatCtx
->streams
[stream_index
];
1757 mVideo
.mCodecCtx
= std::move(avctx
);
1764 return static_cast<int>(stream_index
);
1767 int MovieState::parse_handler()
1769 auto &audio_queue
= mAudio
.mPackets
;
1770 auto &video_queue
= mVideo
.mPackets
;
1772 int video_index
{-1};
1773 int audio_index
{-1};
1775 /* Find the first video and audio streams */
1776 for(unsigned int i
{0u};i
< mFormatCtx
->nb_streams
;i
++)
1778 auto codecpar
= mFormatCtx
->streams
[i
]->codecpar
;
1779 if(codecpar
->codec_type
== AVMEDIA_TYPE_VIDEO
&& !DisableVideo
&& video_index
< 0)
1780 video_index
= streamComponentOpen(i
);
1781 else if(codecpar
->codec_type
== AVMEDIA_TYPE_AUDIO
&& audio_index
< 0)
1782 audio_index
= streamComponentOpen(i
);
1785 if(video_index
< 0 && audio_index
< 0)
1787 std::cerr
<< mFilename
<<": could not open codecs" <<std::endl
;
1791 /* Set the base time 750ms ahead of the current av time. */
1792 mClockBase
= get_avtime() + milliseconds
{750};
1794 if(audio_index
>= 0)
1795 mAudioThread
= std::thread
{std::mem_fn(&AudioState::handler
), &mAudio
};
1796 if(video_index
>= 0)
1797 mVideoThread
= std::thread
{std::mem_fn(&VideoState::handler
), &mVideo
};
1799 /* Main packet reading/dispatching loop */
1800 while(!mQuit
.load(std::memory_order_relaxed
))
1803 if(av_read_frame(mFormatCtx
.get(), &packet
) < 0)
1806 /* Copy the packet into the queue it's meant for. */
1807 if(packet
.stream_index
== video_index
)
1809 while(!mQuit
.load(std::memory_order_acquire
) && !video_queue
.put(&packet
))
1810 std::this_thread::sleep_for(milliseconds
{100});
1812 else if(packet
.stream_index
== audio_index
)
1814 while(!mQuit
.load(std::memory_order_acquire
) && !audio_queue
.put(&packet
))
1815 std::this_thread::sleep_for(milliseconds
{100});
1818 av_packet_unref(&packet
);
1820 /* Finish the queues so the receivers know nothing more is coming. */
1821 if(mVideo
.mCodecCtx
) video_queue
.setFinished();
1822 if(mAudio
.mCodecCtx
) audio_queue
.setFinished();
1824 /* all done - wait for it */
1825 if(mVideoThread
.joinable())
1826 mVideoThread
.join();
1827 if(mAudioThread
.joinable())
1828 mAudioThread
.join();
1831 std::unique_lock
<std::mutex
> lock
{mVideo
.mPictQMutex
};
1832 while(!mVideo
.mFinalUpdate
)
1833 mVideo
.mPictQCond
.wait(lock
);
1837 evt
.user
.type
= FF_MOVIE_DONE_EVENT
;
1838 SDL_PushEvent(&evt
);
1844 // Helper class+method to print the time with human-readable formatting.
1848 std::ostream
&operator<<(std::ostream
&os
, const PrettyTime
&rhs
)
1850 using hours
= std::chrono::hours
;
1851 using minutes
= std::chrono::minutes
;
1853 seconds t
{rhs
.mTime
};
1860 // Only handle up to hour formatting
1862 os
<< duration_cast
<hours
>(t
).count() << 'h' << std::setfill('0') << std::setw(2)
1863 << (duration_cast
<minutes
>(t
).count() % 60) << 'm';
1865 os
<< duration_cast
<minutes
>(t
).count() << 'm' << std::setfill('0');
1866 os
<< std::setw(2) << (duration_cast
<seconds
>(t
).count() % 60) << 's' << std::setw(0)
1867 << std::setfill(' ');
1874 int main(int argc
, char *argv
[])
1876 std::unique_ptr
<MovieState
> movState
;
1880 std::cerr
<< "Usage: "<<argv
[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl
;
1883 /* Register all formats and codecs */
1884 #if !(LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(58, 9, 100))
1887 /* Initialize networking protocols */
1888 avformat_network_init();
1890 if(SDL_Init(SDL_INIT_VIDEO
| SDL_INIT_EVENTS
))
1892 std::cerr
<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl
;
1896 /* Make a window to put our video */
1897 SDL_Window
*screen
{SDL_CreateWindow(AppName
.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE
)};
1900 std::cerr
<< "SDL: could not set video mode - exiting" <<std::endl
;
1903 /* Make a renderer to handle the texture image surface and rendering. */
1904 Uint32 render_flags
{SDL_RENDERER_ACCELERATED
| SDL_RENDERER_PRESENTVSYNC
};
1905 SDL_Renderer
*renderer
{SDL_CreateRenderer(screen
, -1, render_flags
)};
1908 SDL_RendererInfo rinf
{};
1911 /* Make sure the renderer supports IYUV textures. If not, fallback to a
1912 * software renderer. */
1913 if(SDL_GetRendererInfo(renderer
, &rinf
) == 0)
1915 for(Uint32 i
{0u};!ok
&& i
< rinf
.num_texture_formats
;i
++)
1916 ok
= (rinf
.texture_formats
[i
] == SDL_PIXELFORMAT_IYUV
);
1920 std::cerr
<< "IYUV pixelformat textures not supported on renderer "<<rinf
.name
<<std::endl
;
1921 SDL_DestroyRenderer(renderer
);
1927 render_flags
= SDL_RENDERER_SOFTWARE
| SDL_RENDERER_PRESENTVSYNC
;
1928 renderer
= SDL_CreateRenderer(screen
, -1, render_flags
);
1932 std::cerr
<< "SDL: could not create renderer - exiting" <<std::endl
;
1935 SDL_SetRenderDrawColor(renderer
, 0, 0, 0, 255);
1936 SDL_RenderFillRect(renderer
, nullptr);
1937 SDL_RenderPresent(renderer
);
1939 /* Open an audio device */
1941 if(InitAL(&argv
, &argc
))
1943 std::cerr
<< "Failed to set up audio device" <<std::endl
;
1948 auto device
= alcGetContextsDevice(alcGetCurrentContext());
1949 if(alcIsExtensionPresent(device
, "ALC_SOFT_device_clock"))
1951 std::cout
<< "Found ALC_SOFT_device_clock" <<std::endl
;
1952 alcGetInteger64vSOFT
= reinterpret_cast<LPALCGETINTEGER64VSOFT
>(
1953 alcGetProcAddress(device
, "alcGetInteger64vSOFT")
1958 if(alIsExtensionPresent("AL_SOFT_source_latency"))
1960 std::cout
<< "Found AL_SOFT_source_latency" <<std::endl
;
1961 alGetSourcei64vSOFT
= reinterpret_cast<LPALGETSOURCEI64VSOFT
>(
1962 alGetProcAddress("alGetSourcei64vSOFT")
1965 #ifdef AL_SOFT_events
1966 if(alIsExtensionPresent("AL_SOFTX_events"))
1968 std::cout
<< "Found AL_SOFT_events" <<std::endl
;
1969 alEventControlSOFT
= reinterpret_cast<LPALEVENTCONTROLSOFT
>(
1970 alGetProcAddress("alEventControlSOFT"));
1971 alEventCallbackSOFT
= reinterpret_cast<LPALEVENTCALLBACKSOFT
>(
1972 alGetProcAddress("alEventCallbackSOFT"));
1975 #ifdef AL_SOFT_callback_buffer
1976 if(alIsExtensionPresent("AL_SOFTX_callback_buffer"))
1978 std::cout
<< "Found AL_SOFT_callback_buffer" <<std::endl
;
1979 alBufferCallbackSOFT
= reinterpret_cast<LPALBUFFERCALLBACKSOFT
>(
1980 alGetProcAddress("alBufferCallbackSOFT"));
1985 for(;fileidx
< argc
;++fileidx
)
1987 if(strcmp(argv
[fileidx
], "-direct") == 0)
1989 if(alIsExtensionPresent("AL_SOFT_direct_channels_remix"))
1991 std::cout
<< "Found AL_SOFT_direct_channels_remix" <<std::endl
;
1992 DirectOutMode
= AL_REMIX_UNMATCHED_SOFT
;
1994 else if(alIsExtensionPresent("AL_SOFT_direct_channels"))
1996 std::cout
<< "Found AL_SOFT_direct_channels" <<std::endl
;
1997 DirectOutMode
= AL_DROP_UNMATCHED_SOFT
;
2000 std::cerr
<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl
;
2002 else if(strcmp(argv
[fileidx
], "-wide") == 0)
2004 if(!alIsExtensionPresent("AL_EXT_STEREO_ANGLES"))
2005 std::cerr
<< "AL_EXT_STEREO_ANGLES not supported for wide stereo" <<std::endl
;
2008 std::cout
<< "Found AL_EXT_STEREO_ANGLES" <<std::endl
;
2009 EnableWideStereo
= true;
2012 else if(strcmp(argv
[fileidx
], "-novideo") == 0)
2013 DisableVideo
= true;
2018 while(fileidx
< argc
&& !movState
)
2020 movState
= std::unique_ptr
<MovieState
>{new MovieState
{argv
[fileidx
++]}};
2021 if(!movState
->prepare()) movState
= nullptr;
2025 std::cerr
<< "Could not start a video" <<std::endl
;
2028 movState
->setTitle(screen
);
2030 /* Default to going to the next movie at the end of one. */
2031 enum class EomAction
{
2033 } eom_action
{EomAction::Next
};
2034 seconds last_time
{seconds::min()};
2038 int have_evt
{SDL_WaitEventTimeout(&event
, 10)};
2040 auto cur_time
= std::chrono::duration_cast
<seconds
>(movState
->getMasterClock());
2041 if(cur_time
!= last_time
)
2043 auto end_time
= std::chrono::duration_cast
<seconds
>(movState
->getDuration());
2044 std::cout
<< " \r "<<PrettyTime
{cur_time
}<<" / "<<PrettyTime
{end_time
} <<std::flush
;
2045 last_time
= cur_time
;
2048 bool force_redraw
{false};
2053 switch(event
.key
.keysym
.sym
)
2056 movState
->mQuit
= true;
2057 eom_action
= EomAction::Quit
;
2061 movState
->mQuit
= true;
2062 eom_action
= EomAction::Next
;
2070 case SDL_WINDOWEVENT
:
2071 switch(event
.window
.event
)
2073 case SDL_WINDOWEVENT_RESIZED
:
2074 SDL_SetRenderDrawColor(renderer
, 0, 0, 0, 255);
2075 SDL_RenderFillRect(renderer
, nullptr);
2076 force_redraw
= true;
2079 case SDL_WINDOWEVENT_EXPOSED
:
2080 force_redraw
= true;
2089 movState
->mQuit
= true;
2090 eom_action
= EomAction::Quit
;
2093 case FF_MOVIE_DONE_EVENT
:
2095 last_time
= seconds::min();
2096 if(eom_action
!= EomAction::Quit
)
2099 while(fileidx
< argc
&& !movState
)
2101 movState
= std::unique_ptr
<MovieState
>{new MovieState
{argv
[fileidx
++]}};
2102 if(!movState
->prepare()) movState
= nullptr;
2106 movState
->setTitle(screen
);
2111 /* Nothing more to play. Shut everything down and quit. */
2116 SDL_DestroyRenderer(renderer
);
2118 SDL_DestroyWindow(screen
);
2127 } while(SDL_PollEvent(&event
));
2129 movState
->mVideo
.updateVideo(screen
, renderer
, force_redraw
);
2132 std::cerr
<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl
;