Update alffplay's command line message
[openal-soft.git] / examples / alffplay.cpp
blob77fc148ffbb9efd82e03a16f2fc03d51cacb7274
1 /*
2 * An example showing how to play a stream sync'd to video, using ffmpeg.
4 * Requires C++11.
5 */
7 #include <condition_variable>
8 #include <functional>
9 #include <algorithm>
10 #include <iostream>
11 #include <iomanip>
12 #include <cstring>
13 #include <limits>
14 #include <thread>
15 #include <chrono>
16 #include <atomic>
17 #include <mutex>
18 #include <deque>
20 extern "C" {
21 #include "libavcodec/avcodec.h"
22 #include "libavformat/avformat.h"
23 #include "libavformat/avio.h"
24 #include "libavutil/time.h"
25 #include "libavutil/pixfmt.h"
26 #include "libavutil/avstring.h"
27 #include "libavutil/channel_layout.h"
28 #include "libswscale/swscale.h"
29 #include "libswresample/swresample.h"
32 #include "SDL.h"
34 #include "AL/alc.h"
35 #include "AL/al.h"
36 #include "AL/alext.h"
38 namespace
41 static const std::string AppName("alffplay");
43 static bool do_direct_out = false;
44 static bool has_latency_check = false;
45 static LPALGETSOURCEDVSOFT alGetSourcedvSOFT;
47 #define AUDIO_BUFFER_TIME 100 /* In milliseconds, per-buffer */
48 #define AUDIO_BUFFER_QUEUE_SIZE 8 /* Number of buffers to queue */
49 #define MAX_QUEUE_SIZE (15 * 1024 * 1024) /* Bytes of compressed data to keep queued */
50 #define AV_SYNC_THRESHOLD 0.01
51 #define AV_NOSYNC_THRESHOLD 10.0
52 #define SAMPLE_CORRECTION_MAX_DIFF 0.05
53 #define AUDIO_DIFF_AVG_NB 20
54 #define VIDEO_PICTURE_QUEUE_SIZE 16
56 enum {
57 FF_UPDATE_EVENT = SDL_USEREVENT,
58 FF_REFRESH_EVENT,
59 FF_MOVIE_DONE_EVENT
62 enum {
63 AV_SYNC_AUDIO_MASTER,
64 AV_SYNC_VIDEO_MASTER,
65 AV_SYNC_EXTERNAL_MASTER,
67 DEFAULT_AV_SYNC_TYPE = AV_SYNC_EXTERNAL_MASTER
71 struct PacketQueue {
72 std::deque<AVPacket> mPackets;
73 std::atomic<int> mTotalSize;
74 std::atomic<bool> mFinished;
75 std::mutex mMutex;
76 std::condition_variable mCond;
78 PacketQueue() : mTotalSize(0), mFinished(false)
79 { }
80 ~PacketQueue()
81 { clear(); }
83 int put(const AVPacket *pkt);
84 int peek(AVPacket *pkt, std::atomic<bool> &quit_var);
85 void pop();
87 void clear();
88 void finish();
92 struct MovieState;
94 struct AudioState {
95 MovieState *mMovie;
97 AVStream *mStream;
98 AVCodecContext *mCodecCtx;
100 PacketQueue mQueue;
102 /* Used for clock difference average computation */
103 struct {
104 std::atomic<int> Clocks; /* In microseconds */
105 double Accum;
106 double AvgCoeff;
107 double Threshold;
108 int AvgCount;
109 } mDiff;
111 /* Time (in seconds) of the next sample to be buffered */
112 double mCurrentPts;
114 /* Decompressed sample frame, and swresample context for conversion */
115 AVFrame *mDecodedFrame;
116 struct SwrContext *mSwresCtx;
118 /* Conversion format, for what gets fed to Alure */
119 int mDstChanLayout;
120 enum AVSampleFormat mDstSampleFmt;
122 /* Storage of converted samples */
123 uint8_t *mSamples;
124 int mSamplesLen; /* In samples */
125 int mSamplesPos;
126 int mSamplesMax;
128 /* OpenAL format */
129 ALenum mFormat;
130 ALsizei mFrameSize;
132 std::recursive_mutex mSrcMutex;
133 ALuint mSource;
134 ALuint mBuffers[AUDIO_BUFFER_QUEUE_SIZE];
135 ALsizei mBufferIdx;
137 AudioState(MovieState *movie)
138 : mMovie(movie), mStream(nullptr), mCodecCtx(nullptr)
139 , mDiff{{0}, 0.0, 0.0, 0.0, 0}, mCurrentPts(0.0), mDecodedFrame(nullptr)
140 , mSwresCtx(nullptr), mDstChanLayout(0), mDstSampleFmt(AV_SAMPLE_FMT_NONE)
141 , mSamples(nullptr), mSamplesLen(0), mSamplesPos(0), mSamplesMax(0)
142 , mFormat(AL_NONE), mFrameSize(0), mSource(0), mBufferIdx(0)
144 for(auto &buf : mBuffers)
145 buf = 0;
147 ~AudioState()
149 if(mSource)
150 alDeleteSources(1, &mSource);
151 alDeleteBuffers(AUDIO_BUFFER_QUEUE_SIZE, mBuffers);
153 av_frame_free(&mDecodedFrame);
154 swr_free(&mSwresCtx);
156 av_freep(&mSamples);
158 avcodec_free_context(&mCodecCtx);
161 double getClock();
163 int getSync();
164 int decodeFrame();
165 int readAudio(uint8_t *samples, int length);
167 int handler();
170 struct VideoState {
171 MovieState *mMovie;
173 AVStream *mStream;
174 AVCodecContext *mCodecCtx;
176 PacketQueue mQueue;
178 double mClock;
179 double mFrameTimer;
180 double mFrameLastPts;
181 double mFrameLastDelay;
182 double mCurrentPts;
183 /* time (av_gettime) at which we updated mCurrentPts - used to have running video pts */
184 int64_t mCurrentPtsTime;
186 /* Decompressed video frame, and swscale context for conversion */
187 AVFrame *mDecodedFrame;
188 struct SwsContext *mSwscaleCtx;
190 struct Picture {
191 SDL_Texture *mImage;
192 int mWidth, mHeight; /* Logical image size (actual size may be larger) */
193 std::atomic<bool> mUpdated;
194 double mPts;
196 Picture()
197 : mImage(nullptr), mWidth(0), mHeight(0), mUpdated(false), mPts(0.0)
199 ~Picture()
201 if(mImage)
202 SDL_DestroyTexture(mImage);
203 mImage = nullptr;
206 std::array<Picture,VIDEO_PICTURE_QUEUE_SIZE> mPictQ;
207 size_t mPictQSize, mPictQRead, mPictQWrite;
208 std::mutex mPictQMutex;
209 std::condition_variable mPictQCond;
210 bool mFirstUpdate;
211 std::atomic<bool> mEOS;
212 std::atomic<bool> mFinalUpdate;
214 VideoState(MovieState *movie)
215 : mMovie(movie), mStream(nullptr), mCodecCtx(nullptr), mClock(0.0)
216 , mFrameTimer(0.0), mFrameLastPts(0.0), mFrameLastDelay(0.0)
217 , mCurrentPts(0.0), mCurrentPtsTime(0), mDecodedFrame(nullptr)
218 , mSwscaleCtx(nullptr), mPictQSize(0), mPictQRead(0), mPictQWrite(0)
219 , mFirstUpdate(true), mEOS(false), mFinalUpdate(false)
221 ~VideoState()
223 sws_freeContext(mSwscaleCtx);
224 mSwscaleCtx = nullptr;
225 av_frame_free(&mDecodedFrame);
226 avcodec_free_context(&mCodecCtx);
229 double getClock();
231 static Uint32 SDLCALL sdl_refresh_timer_cb(Uint32 interval, void *opaque);
232 void schedRefresh(int delay);
233 void display(SDL_Window *screen, SDL_Renderer *renderer);
234 void refreshTimer(SDL_Window *screen, SDL_Renderer *renderer);
235 void updatePicture(SDL_Window *screen, SDL_Renderer *renderer);
236 int queuePicture(double pts);
237 double synchronize(double pts);
238 int handler();
241 struct MovieState {
242 AVFormatContext *mFormatCtx;
243 int mVideoStream, mAudioStream;
245 int mAVSyncType;
247 int64_t mExternalClockBase;
249 std::atomic<bool> mQuit;
251 AudioState mAudio;
252 VideoState mVideo;
254 std::thread mParseThread;
255 std::thread mAudioThread;
256 std::thread mVideoThread;
258 std::string mFilename;
260 MovieState(std::string fname)
261 : mFormatCtx(nullptr), mVideoStream(0), mAudioStream(0)
262 , mAVSyncType(DEFAULT_AV_SYNC_TYPE), mExternalClockBase(0), mQuit(false)
263 , mAudio(this), mVideo(this), mFilename(std::move(fname))
265 ~MovieState()
267 mQuit = true;
268 if(mParseThread.joinable())
269 mParseThread.join();
270 avformat_close_input(&mFormatCtx);
273 static int decode_interrupt_cb(void *ctx);
274 bool prepare();
275 void setTitle(SDL_Window *window);
277 double getClock();
279 double getMasterClock();
281 int streamComponentOpen(int stream_index);
282 int parse_handler();
286 int PacketQueue::put(const AVPacket *pkt)
288 std::unique_lock<std::mutex> lock(mMutex);
289 mPackets.push_back(AVPacket{});
290 if(av_packet_ref(&mPackets.back(), pkt) != 0)
292 mPackets.pop_back();
293 return -1;
295 mTotalSize += mPackets.back().size;
296 lock.unlock();
298 mCond.notify_one();
299 return 0;
302 int PacketQueue::peek(AVPacket *pkt, std::atomic<bool> &quit_var)
304 std::unique_lock<std::mutex> lock(mMutex);
305 while(!quit_var.load())
307 if(!mPackets.empty())
309 if(av_packet_ref(pkt, &mPackets.front()) != 0)
310 return -1;
311 return 1;
314 if(mFinished.load())
315 return 0;
316 mCond.wait(lock);
318 return -1;
321 void PacketQueue::pop()
323 std::unique_lock<std::mutex> lock(mMutex);
324 AVPacket *pkt = &mPackets.front();
325 mTotalSize -= pkt->size;
326 av_packet_unref(pkt);
327 mPackets.pop_front();
330 void PacketQueue::clear()
332 std::unique_lock<std::mutex> lock(mMutex);
333 std::for_each(mPackets.begin(), mPackets.end(),
334 [](AVPacket &pkt) { av_packet_unref(&pkt); }
336 mPackets.clear();
337 mTotalSize = 0;
339 void PacketQueue::finish()
341 std::unique_lock<std::mutex> lock(mMutex);
342 mFinished = true;
343 lock.unlock();
344 mCond.notify_all();
348 double AudioState::getClock()
350 double pts;
352 std::unique_lock<std::recursive_mutex> lock(mSrcMutex);
353 /* The audio clock is the timestamp of the sample currently being heard.
354 * It's based on 4 components:
355 * 1 - The timestamp of the next sample to buffer (state->current_pts)
356 * 2 - The length of the source's buffer queue
357 * 3 - The offset OpenAL is currently at in the source (the first value
358 * from AL_SEC_OFFSET_LATENCY_SOFT)
359 * 4 - The latency between OpenAL and the DAC (the second value from
360 * AL_SEC_OFFSET_LATENCY_SOFT)
362 * Subtracting the length of the source queue from the next sample's
363 * timestamp gives the timestamp of the sample at start of the source
364 * queue. Adding the source offset to that results in the timestamp for
365 * OpenAL's current position, and subtracting the source latency from that
366 * gives the timestamp of the sample currently at the DAC.
368 pts = mCurrentPts;
369 if(mSource)
371 ALdouble offset[2];
372 ALint queue_size;
373 ALint status;
375 /* NOTE: The source state must be checked last, in case an underrun
376 * occurs and the source stops between retrieving the offset+latency
377 * and getting the state. */
378 if(has_latency_check)
380 alGetSourcedvSOFT(mSource, AL_SEC_OFFSET_LATENCY_SOFT, offset);
381 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queue_size);
383 else
385 ALint ioffset;
386 alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
387 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queue_size);
388 offset[0] = (double)ioffset / (double)mCodecCtx->sample_rate;
389 offset[1] = 0.0f;
391 alGetSourcei(mSource, AL_SOURCE_STATE, &status);
393 /* If the source is AL_STOPPED, then there was an underrun and all
394 * buffers are processed, so ignore the source queue. The audio thread
395 * will put the source into an AL_INITIAL state and clear the queue
396 * when it starts recovery. */
397 if(status != AL_STOPPED)
398 pts -= queue_size*((double)AUDIO_BUFFER_TIME/1000.0) - offset[0];
399 if(status == AL_PLAYING)
400 pts -= offset[1];
402 lock.unlock();
404 return std::max(pts, 0.0);
407 int AudioState::getSync()
409 double diff, avg_diff, ref_clock;
411 if(mMovie->mAVSyncType == AV_SYNC_AUDIO_MASTER)
412 return 0;
414 ref_clock = mMovie->getMasterClock();
415 diff = ref_clock - getClock();
417 if(!(fabs(diff) < AV_NOSYNC_THRESHOLD))
419 /* Difference is TOO big; reset diff stuff */
420 mDiff.Accum = 0.0;
421 return 0;
424 /* Accumulate the diffs */
425 mDiff.Accum = mDiff.Accum*mDiff.AvgCoeff + diff;
426 avg_diff = mDiff.Accum*(1.0 - mDiff.AvgCoeff);
427 if(fabs(avg_diff) < mDiff.Threshold)
428 return 0;
430 /* Constrain the per-update difference to avoid exceedingly large skips */
431 if(!(diff <= SAMPLE_CORRECTION_MAX_DIFF))
432 diff = SAMPLE_CORRECTION_MAX_DIFF;
433 else if(!(diff >= -SAMPLE_CORRECTION_MAX_DIFF))
434 diff = -SAMPLE_CORRECTION_MAX_DIFF;
435 return (int)(diff*mCodecCtx->sample_rate);
438 int AudioState::decodeFrame()
440 while(!mMovie->mQuit.load())
442 while(!mMovie->mQuit.load())
444 /* Get the next packet */
445 AVPacket pkt{};
446 if(mQueue.peek(&pkt, mMovie->mQuit) <= 0)
447 return -1;
449 int ret = avcodec_send_packet(mCodecCtx, &pkt);
450 if(ret != AVERROR(EAGAIN))
452 if(ret < 0)
453 std::cerr<< "Failed to send encoded packet: 0x"<<std::hex<<ret<<std::dec <<std::endl;
454 mQueue.pop();
456 av_packet_unref(&pkt);
457 if(ret == 0 || ret == AVERROR(EAGAIN))
458 break;
461 int ret = avcodec_receive_frame(mCodecCtx, mDecodedFrame);
462 if(ret == AVERROR(EAGAIN))
463 continue;
464 if(ret == AVERROR_EOF || ret < 0)
466 std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
467 return 0;
470 if(mDecodedFrame->nb_samples <= 0)
472 av_frame_unref(mDecodedFrame);
473 continue;
476 /* If provided, update w/ pts */
477 int64_t pts = av_frame_get_best_effort_timestamp(mDecodedFrame);
478 if(pts != AV_NOPTS_VALUE)
479 mCurrentPts = av_q2d(mStream->time_base)*pts;
481 if(mDecodedFrame->nb_samples > mSamplesMax)
483 av_freep(&mSamples);
484 av_samples_alloc(
485 &mSamples, nullptr, mCodecCtx->channels,
486 mDecodedFrame->nb_samples, mDstSampleFmt, 0
488 mSamplesMax = mDecodedFrame->nb_samples;
490 /* Return the amount of sample frames converted */
491 int data_size = swr_convert(mSwresCtx, &mSamples, mDecodedFrame->nb_samples,
492 (const uint8_t**)mDecodedFrame->data, mDecodedFrame->nb_samples
495 av_frame_unref(mDecodedFrame);
496 return data_size;
499 return 0;
502 /* Duplicates the sample at in to out, count times. The frame size is a
503 * multiple of the template type size.
505 template<typename T>
506 static void sample_dup(uint8_t *out, const uint8_t *in, int count, int frame_size)
508 const T *sample = reinterpret_cast<const T*>(in);
509 T *dst = reinterpret_cast<T*>(out);
510 if(frame_size == sizeof(T))
511 std::fill_n(dst, count, *sample);
512 else
514 /* NOTE: frame_size is a multiple of sizeof(T). */
515 int type_mult = frame_size / sizeof(T);
516 int i = 0;
517 std::generate_n(dst, count*type_mult,
518 [sample,type_mult,&i]() -> T
520 T ret = sample[i];
521 i = (i+1)%type_mult;
522 return ret;
529 int AudioState::readAudio(uint8_t *samples, int length)
531 int sample_skip = getSync();
532 int audio_size = 0;
534 /* Read the next chunk of data, refill the buffer, and queue it
535 * on the source */
536 length /= mFrameSize;
537 while(audio_size < length)
539 if(mSamplesLen <= 0 || mSamplesPos >= mSamplesLen)
541 int frame_len = decodeFrame();
542 if(frame_len <= 0) break;
544 mSamplesLen = frame_len;
545 mSamplesPos = std::min(mSamplesLen, sample_skip);
546 sample_skip -= mSamplesPos;
548 mCurrentPts += (double)mSamplesPos / (double)mCodecCtx->sample_rate;
549 continue;
552 int rem = length - audio_size;
553 if(mSamplesPos >= 0)
555 int len = mSamplesLen - mSamplesPos;
556 if(rem > len) rem = len;
557 memcpy(samples, mSamples + mSamplesPos*mFrameSize, rem*mFrameSize);
559 else
561 rem = std::min(rem, -mSamplesPos);
563 /* Add samples by copying the first sample */
564 if((mFrameSize&7) == 0)
565 sample_dup<uint64_t>(samples, mSamples, rem, mFrameSize);
566 else if((mFrameSize&3) == 0)
567 sample_dup<uint32_t>(samples, mSamples, rem, mFrameSize);
568 else if((mFrameSize&1) == 0)
569 sample_dup<uint16_t>(samples, mSamples, rem, mFrameSize);
570 else
571 sample_dup<uint8_t>(samples, mSamples, rem, mFrameSize);
574 mSamplesPos += rem;
575 mCurrentPts += (double)rem / mCodecCtx->sample_rate;
576 samples += rem*mFrameSize;
577 audio_size += rem;
580 if(audio_size < length && audio_size > 0)
582 int rem = length - audio_size;
583 std::fill_n(samples, rem*mFrameSize,
584 (mDstSampleFmt == AV_SAMPLE_FMT_U8) ? 0x80 : 0x00);
585 mCurrentPts += (double)rem / mCodecCtx->sample_rate;
586 audio_size += rem;
589 return audio_size * mFrameSize;
593 int AudioState::handler()
595 std::unique_lock<std::recursive_mutex> lock(mSrcMutex);
596 ALenum fmt;
598 /* Find a suitable format for Alure. */
599 mDstChanLayout = 0;
600 if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P)
602 mDstSampleFmt = AV_SAMPLE_FMT_U8;
603 mFrameSize = 1;
604 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
605 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
606 (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1)
608 mDstChanLayout = mCodecCtx->channel_layout;
609 mFrameSize *= 8;
610 mFormat = fmt;
612 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
613 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
614 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
615 (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1)
617 mDstChanLayout = mCodecCtx->channel_layout;
618 mFrameSize *= 6;
619 mFormat = fmt;
621 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
623 mDstChanLayout = mCodecCtx->channel_layout;
624 mFrameSize *= 1;
625 mFormat = AL_FORMAT_MONO8;
627 if(!mDstChanLayout)
629 mDstChanLayout = AV_CH_LAYOUT_STEREO;
630 mFrameSize *= 2;
631 mFormat = AL_FORMAT_STEREO8;
634 if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP) &&
635 alIsExtensionPresent("AL_EXT_FLOAT32"))
637 mDstSampleFmt = AV_SAMPLE_FMT_FLT;
638 mFrameSize = 4;
639 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
640 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
641 (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1)
643 mDstChanLayout = mCodecCtx->channel_layout;
644 mFrameSize *= 8;
645 mFormat = fmt;
647 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
648 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
649 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
650 (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1)
652 mDstChanLayout = mCodecCtx->channel_layout;
653 mFrameSize *= 6;
654 mFormat = fmt;
656 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
658 mDstChanLayout = mCodecCtx->channel_layout;
659 mFrameSize *= 1;
660 mFormat = AL_FORMAT_MONO_FLOAT32;
662 if(!mDstChanLayout)
664 mDstChanLayout = AV_CH_LAYOUT_STEREO;
665 mFrameSize *= 2;
666 mFormat = AL_FORMAT_STEREO_FLOAT32;
669 if(!mDstChanLayout)
671 mDstSampleFmt = AV_SAMPLE_FMT_S16;
672 mFrameSize = 2;
673 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
674 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
675 (fmt=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE && fmt != -1)
677 mDstChanLayout = mCodecCtx->channel_layout;
678 mFrameSize *= 8;
679 mFormat = fmt;
681 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
682 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
683 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
684 (fmt=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE && fmt != -1)
686 mDstChanLayout = mCodecCtx->channel_layout;
687 mFrameSize *= 6;
688 mFormat = fmt;
690 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
692 mDstChanLayout = mCodecCtx->channel_layout;
693 mFrameSize *= 1;
694 mFormat = AL_FORMAT_MONO16;
696 if(!mDstChanLayout)
698 mDstChanLayout = AV_CH_LAYOUT_STEREO;
699 mFrameSize *= 2;
700 mFormat = AL_FORMAT_STEREO16;
703 ALsizei buffer_len = mCodecCtx->sample_rate * AUDIO_BUFFER_TIME / 1000 *
704 mFrameSize;
705 void *samples = av_malloc(buffer_len);
707 mSamples = NULL;
708 mSamplesMax = 0;
709 mSamplesPos = 0;
710 mSamplesLen = 0;
712 if(!(mDecodedFrame=av_frame_alloc()))
714 std::cerr<< "Failed to allocate audio frame" <<std::endl;
715 goto finish;
718 mSwresCtx = swr_alloc_set_opts(nullptr,
719 mDstChanLayout, mDstSampleFmt, mCodecCtx->sample_rate,
720 mCodecCtx->channel_layout ? mCodecCtx->channel_layout :
721 (uint64_t)av_get_default_channel_layout(mCodecCtx->channels),
722 mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
723 0, nullptr
725 if(!mSwresCtx || swr_init(mSwresCtx) != 0)
727 std::cerr<< "Failed to initialize audio converter" <<std::endl;
728 goto finish;
731 alGenBuffers(AUDIO_BUFFER_QUEUE_SIZE, mBuffers);
732 alGenSources(1, &mSource);
734 if(do_direct_out)
736 if(!alIsExtensionPresent("AL_SOFT_direct_channels"))
737 std::cerr<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl;
738 else
740 alSourcei(mSource, AL_DIRECT_CHANNELS_SOFT, AL_TRUE);
741 std::cout<< "Direct out enabled" <<std::endl;
745 while(alGetError() == AL_NO_ERROR && !mMovie->mQuit.load())
747 /* First remove any processed buffers. */
748 ALint processed;
749 alGetSourcei(mSource, AL_BUFFERS_PROCESSED, &processed);
750 if(processed > 0)
752 std::array<ALuint,AUDIO_BUFFER_QUEUE_SIZE> tmp;
753 alSourceUnqueueBuffers(mSource, processed, tmp.data());
756 /* Refill the buffer queue. */
757 ALint queued;
758 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
759 while(queued < AUDIO_BUFFER_QUEUE_SIZE)
761 int audio_size;
763 /* Read the next chunk of data, fill the buffer, and queue it on
764 * the source */
765 audio_size = readAudio(reinterpret_cast<uint8_t*>(samples), buffer_len);
766 if(audio_size <= 0) break;
768 ALuint bufid = mBuffers[mBufferIdx++];
769 mBufferIdx %= AUDIO_BUFFER_QUEUE_SIZE;
771 alBufferData(bufid, mFormat, samples, audio_size, mCodecCtx->sample_rate);
772 alSourceQueueBuffers(mSource, 1, &bufid);
773 queued++;
775 if(queued == 0)
776 break;
778 /* Check that the source is playing. */
779 ALint state;
780 alGetSourcei(mSource, AL_SOURCE_STATE, &state);
781 if(state == AL_STOPPED)
783 /* AL_STOPPED means there was an underrun. Rewind the source to get
784 * it back into an AL_INITIAL state.
786 alSourceRewind(mSource);
787 continue;
790 lock.unlock();
792 /* (re)start the source if needed, and wait for a buffer to finish */
793 if(state != AL_PLAYING && state != AL_PAUSED)
794 alSourcePlay(mSource);
795 SDL_Delay(AUDIO_BUFFER_TIME / 3);
797 lock.lock();
800 finish:
801 alSourceRewind(mSource);
802 alSourcei(mSource, AL_BUFFER, 0);
804 av_frame_free(&mDecodedFrame);
805 swr_free(&mSwresCtx);
807 av_freep(&mSamples);
809 return 0;
813 double VideoState::getClock()
815 double delta = (av_gettime() - mCurrentPtsTime) / 1000000.0;
816 return mCurrentPts + delta;
819 Uint32 SDLCALL VideoState::sdl_refresh_timer_cb(Uint32 /*interval*/, void *opaque)
821 SDL_Event evt{};
822 evt.user.type = FF_REFRESH_EVENT;
823 evt.user.data1 = opaque;
824 SDL_PushEvent(&evt);
825 return 0; /* 0 means stop timer */
828 /* Schedules an FF_REFRESH_EVENT event to occur in 'delay' ms. */
829 void VideoState::schedRefresh(int delay)
831 SDL_AddTimer(delay, sdl_refresh_timer_cb, this);
834 /* Called by VideoState::refreshTimer to display the next video frame. */
835 void VideoState::display(SDL_Window *screen, SDL_Renderer *renderer)
837 Picture *vp = &mPictQ[mPictQRead];
839 if(!vp->mImage)
840 return;
842 float aspect_ratio;
843 int win_w, win_h;
844 int w, h, x, y;
846 if(mCodecCtx->sample_aspect_ratio.num == 0)
847 aspect_ratio = 0.0f;
848 else
850 aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio) * mCodecCtx->width /
851 mCodecCtx->height;
853 if(aspect_ratio <= 0.0f)
854 aspect_ratio = (float)mCodecCtx->width / (float)mCodecCtx->height;
856 SDL_GetWindowSize(screen, &win_w, &win_h);
857 h = win_h;
858 w = ((int)rint(h * aspect_ratio) + 3) & ~3;
859 if(w > win_w)
861 w = win_w;
862 h = ((int)rint(w / aspect_ratio) + 3) & ~3;
864 x = (win_w - w) / 2;
865 y = (win_h - h) / 2;
867 SDL_Rect src_rect{ 0, 0, vp->mWidth, vp->mHeight };
868 SDL_Rect dst_rect{ x, y, w, h };
869 SDL_RenderCopy(renderer, vp->mImage, &src_rect, &dst_rect);
870 SDL_RenderPresent(renderer);
873 /* FF_REFRESH_EVENT handler called on the main thread where the SDL_Renderer
874 * was created. It handles the display of the next decoded video frame (if not
875 * falling behind), and sets up the timer for the following video frame.
877 void VideoState::refreshTimer(SDL_Window *screen, SDL_Renderer *renderer)
879 if(!mStream)
881 if(mEOS)
883 mFinalUpdate = true;
884 std::unique_lock<std::mutex>(mPictQMutex).unlock();
885 mPictQCond.notify_all();
886 return;
888 schedRefresh(100);
889 return;
892 std::unique_lock<std::mutex> lock(mPictQMutex);
893 retry:
894 if(mPictQSize == 0)
896 if(mEOS)
897 mFinalUpdate = true;
898 else
899 schedRefresh(1);
900 lock.unlock();
901 mPictQCond.notify_all();
902 return;
905 Picture *vp = &mPictQ[mPictQRead];
906 mCurrentPts = vp->mPts;
907 mCurrentPtsTime = av_gettime();
909 /* Get delay using the frame pts and the pts from last frame. */
910 double delay = vp->mPts - mFrameLastPts;
911 if(delay <= 0 || delay >= 1.0)
913 /* If incorrect delay, use previous one. */
914 delay = mFrameLastDelay;
916 /* Save for next frame. */
917 mFrameLastDelay = delay;
918 mFrameLastPts = vp->mPts;
920 /* Update delay to sync to clock if not master source. */
921 if(mMovie->mAVSyncType != AV_SYNC_VIDEO_MASTER)
923 double ref_clock = mMovie->getMasterClock();
924 double diff = vp->mPts - ref_clock;
926 /* Skip or repeat the frame. Take delay into account. */
927 double sync_threshold = std::min(delay, AV_SYNC_THRESHOLD);
928 if(fabs(diff) < AV_NOSYNC_THRESHOLD)
930 if(diff <= -sync_threshold)
931 delay = 0;
932 else if(diff >= sync_threshold)
933 delay *= 2.0;
937 mFrameTimer += delay;
938 /* Compute the REAL delay. */
939 double actual_delay = mFrameTimer - (av_gettime() / 1000000.0);
940 if(!(actual_delay >= 0.010))
942 /* We don't have time to handle this picture, just skip to the next one. */
943 mPictQRead = (mPictQRead+1)%mPictQ.size();
944 mPictQSize--;
945 goto retry;
947 schedRefresh((int)(actual_delay*1000.0 + 0.5));
949 /* Show the picture! */
950 display(screen, renderer);
952 /* Update queue for next picture. */
953 mPictQRead = (mPictQRead+1)%mPictQ.size();
954 mPictQSize--;
955 lock.unlock();
956 mPictQCond.notify_all();
959 /* FF_UPDATE_EVENT handler, updates the picture's texture. It's called on the
960 * main thread where the renderer was created.
962 void VideoState::updatePicture(SDL_Window *screen, SDL_Renderer *renderer)
964 Picture *vp = &mPictQ[mPictQWrite];
965 bool fmt_updated = false;
967 /* allocate or resize the buffer! */
968 if(!vp->mImage || vp->mWidth != mCodecCtx->width || vp->mHeight != mCodecCtx->height)
970 fmt_updated = true;
971 if(vp->mImage)
972 SDL_DestroyTexture(vp->mImage);
973 vp->mImage = SDL_CreateTexture(
974 renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
975 mCodecCtx->coded_width, mCodecCtx->coded_height
977 if(!vp->mImage)
978 std::cerr<< "Failed to create YV12 texture!" <<std::endl;
979 vp->mWidth = mCodecCtx->width;
980 vp->mHeight = mCodecCtx->height;
982 if(mFirstUpdate && vp->mWidth > 0 && vp->mHeight > 0)
984 /* For the first update, set the window size to the video size. */
985 mFirstUpdate = false;
987 int w = vp->mWidth;
988 int h = vp->mHeight;
989 if(mCodecCtx->sample_aspect_ratio.den != 0)
991 double aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio);
992 if(aspect_ratio >= 1.0)
993 w = (int)(w*aspect_ratio + 0.5);
994 else if(aspect_ratio > 0.0)
995 h = (int)(h/aspect_ratio + 0.5);
997 SDL_SetWindowSize(screen, w, h);
1001 if(vp->mImage)
1003 AVFrame *frame = mDecodedFrame;
1004 void *pixels = nullptr;
1005 int pitch = 0;
1007 if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P)
1008 SDL_UpdateYUVTexture(vp->mImage, nullptr,
1009 frame->data[0], frame->linesize[0],
1010 frame->data[1], frame->linesize[1],
1011 frame->data[2], frame->linesize[2]
1013 else if(SDL_LockTexture(vp->mImage, nullptr, &pixels, &pitch) != 0)
1014 std::cerr<< "Failed to lock texture" <<std::endl;
1015 else
1017 // Convert the image into YUV format that SDL uses
1018 int coded_w = mCodecCtx->coded_width;
1019 int coded_h = mCodecCtx->coded_height;
1020 int w = mCodecCtx->width;
1021 int h = mCodecCtx->height;
1022 if(!mSwscaleCtx || fmt_updated)
1024 sws_freeContext(mSwscaleCtx);
1025 mSwscaleCtx = sws_getContext(
1026 w, h, mCodecCtx->pix_fmt,
1027 w, h, AV_PIX_FMT_YUV420P, 0,
1028 nullptr, nullptr, nullptr
1032 /* point pict at the queue */
1033 uint8_t *pict_data[3];
1034 pict_data[0] = reinterpret_cast<uint8_t*>(pixels);
1035 pict_data[1] = pict_data[0] + coded_w*coded_h;
1036 pict_data[2] = pict_data[1] + coded_w*coded_h/4;
1038 int pict_linesize[3];
1039 pict_linesize[0] = pitch;
1040 pict_linesize[1] = pitch / 2;
1041 pict_linesize[2] = pitch / 2;
1043 sws_scale(mSwscaleCtx, (const uint8_t**)frame->data,
1044 frame->linesize, 0, h, pict_data, pict_linesize);
1045 SDL_UnlockTexture(vp->mImage);
1049 std::unique_lock<std::mutex> lock(mPictQMutex);
1050 vp->mUpdated = true;
1051 lock.unlock();
1052 mPictQCond.notify_one();
1055 int VideoState::queuePicture(double pts)
1057 /* Wait until we have space for a new pic */
1058 std::unique_lock<std::mutex> lock(mPictQMutex);
1059 while(mPictQSize >= mPictQ.size() && !mMovie->mQuit.load())
1060 mPictQCond.wait(lock);
1061 lock.unlock();
1063 if(mMovie->mQuit.load())
1064 return -1;
1066 Picture *vp = &mPictQ[mPictQWrite];
1068 /* We have to create/update the picture in the main thread */
1069 vp->mUpdated = false;
1070 SDL_Event evt{};
1071 evt.user.type = FF_UPDATE_EVENT;
1072 evt.user.data1 = this;
1073 SDL_PushEvent(&evt);
1075 /* Wait until the picture is updated. */
1076 lock.lock();
1077 while(!vp->mUpdated && !mMovie->mQuit.load())
1078 mPictQCond.wait(lock);
1079 if(mMovie->mQuit.load())
1080 return -1;
1081 vp->mPts = pts;
1083 mPictQWrite = (mPictQWrite+1)%mPictQ.size();
1084 mPictQSize++;
1085 lock.unlock();
1087 return 0;
1090 double VideoState::synchronize(double pts)
1092 double frame_delay;
1094 if(pts == 0.0) /* if we aren't given a pts, set it to the clock */
1095 pts = mClock;
1096 else /* if we have pts, set video clock to it */
1097 mClock = pts;
1099 /* update the video clock */
1100 frame_delay = av_q2d(mCodecCtx->time_base);
1101 /* if we are repeating a frame, adjust clock accordingly */
1102 frame_delay += mDecodedFrame->repeat_pict * (frame_delay * 0.5);
1103 mClock += frame_delay;
1104 return pts;
1107 int VideoState::handler()
1109 mDecodedFrame = av_frame_alloc();
1110 while(!mMovie->mQuit)
1112 while(!mMovie->mQuit)
1114 AVPacket packet{};
1115 if(mQueue.peek(&packet, mMovie->mQuit) <= 0)
1116 goto finish;
1118 int ret = avcodec_send_packet(mCodecCtx, &packet);
1119 if(ret != AVERROR(EAGAIN))
1121 if(ret < 0)
1122 std::cerr<< "Failed to send encoded packet: 0x"<<std::hex<<ret<<std::dec <<std::endl;
1123 mQueue.pop();
1125 av_packet_unref(&packet);
1126 if(ret == 0 || ret == AVERROR(EAGAIN))
1127 break;
1130 /* Decode video frame */
1131 int ret = avcodec_receive_frame(mCodecCtx, mDecodedFrame);
1132 if(ret == AVERROR(EAGAIN))
1133 continue;
1134 if(ret < 0)
1136 std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
1137 break;
1140 double pts = synchronize(
1141 av_q2d(mStream->time_base) * av_frame_get_best_effort_timestamp(mDecodedFrame)
1143 if(queuePicture(pts) < 0)
1144 break;
1145 av_frame_unref(mDecodedFrame);
1147 finish:
1148 mEOS = true;
1149 av_frame_free(&mDecodedFrame);
1151 std::unique_lock<std::mutex> lock(mPictQMutex);
1152 if(mMovie->mQuit)
1154 mPictQRead = 0;
1155 mPictQWrite = 0;
1156 mPictQSize = 0;
1158 while(!mFinalUpdate)
1159 mPictQCond.wait(lock);
1161 return 0;
1165 int MovieState::decode_interrupt_cb(void *ctx)
1167 return reinterpret_cast<MovieState*>(ctx)->mQuit;
1170 bool MovieState::prepare()
1172 mFormatCtx = avformat_alloc_context();
1173 mFormatCtx->interrupt_callback.callback = decode_interrupt_cb;
1174 mFormatCtx->interrupt_callback.opaque = this;
1175 if(avio_open2(&mFormatCtx->pb, mFilename.c_str(), AVIO_FLAG_READ,
1176 &mFormatCtx->interrupt_callback, nullptr))
1178 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1179 return false;
1182 /* Open movie file */
1183 if(avformat_open_input(&mFormatCtx, mFilename.c_str(), nullptr, nullptr) != 0)
1185 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1186 return false;
1189 /* Retrieve stream information */
1190 if(avformat_find_stream_info(mFormatCtx, nullptr) < 0)
1192 std::cerr<< mFilename<<": failed to find stream info" <<std::endl;
1193 return false;
1196 mVideo.schedRefresh(40);
1198 mParseThread = std::thread(std::mem_fn(&MovieState::parse_handler), this);
1199 return true;
1202 void MovieState::setTitle(SDL_Window *window)
1204 auto pos1 = mFilename.rfind('/');
1205 auto pos2 = mFilename.rfind('\\');
1206 auto fpos = ((pos1 == std::string::npos) ? pos2 :
1207 (pos2 == std::string::npos) ? pos1 :
1208 std::max(pos1, pos2)) + 1;
1209 SDL_SetWindowTitle(window, (mFilename.substr(fpos)+" - "+AppName).c_str());
1212 double MovieState::getClock()
1214 return (av_gettime()-mExternalClockBase) / 1000000.0;
1217 double MovieState::getMasterClock()
1219 if(mAVSyncType == AV_SYNC_VIDEO_MASTER)
1220 return mVideo.getClock();
1221 if(mAVSyncType == AV_SYNC_AUDIO_MASTER)
1222 return mAudio.getClock();
1223 return getClock();
1226 int MovieState::streamComponentOpen(int stream_index)
1228 if(stream_index < 0 || (unsigned int)stream_index >= mFormatCtx->nb_streams)
1229 return -1;
1231 /* Get a pointer to the codec context for the stream, and open the
1232 * associated codec.
1234 AVCodecContext *avctx = avcodec_alloc_context3(nullptr);
1235 if(!avctx) return -1;
1237 if(avcodec_parameters_to_context(avctx, mFormatCtx->streams[stream_index]->codecpar))
1239 avcodec_free_context(&avctx);
1240 return -1;
1243 AVCodec *codec = avcodec_find_decoder(avctx->codec_id);
1244 if(!codec || avcodec_open2(avctx, codec, nullptr) < 0)
1246 std::cerr<< "Unsupported codec: "<<avcodec_get_name(avctx->codec_id)
1247 << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl;
1248 avcodec_free_context(&avctx);
1249 return -1;
1252 /* Initialize and start the media type handler */
1253 switch(avctx->codec_type)
1255 case AVMEDIA_TYPE_AUDIO:
1256 mAudioStream = stream_index;
1257 mAudio.mStream = mFormatCtx->streams[stream_index];
1258 mAudio.mCodecCtx = avctx;
1260 /* Averaging filter for audio sync */
1261 mAudio.mDiff.AvgCoeff = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1262 /* Correct audio only if larger error than this */
1263 mAudio.mDiff.Threshold = 0.050/* 50 ms */;
1265 mAudioThread = std::thread(std::mem_fn(&AudioState::handler), &mAudio);
1266 break;
1268 case AVMEDIA_TYPE_VIDEO:
1269 mVideoStream = stream_index;
1270 mVideo.mStream = mFormatCtx->streams[stream_index];
1271 mVideo.mCodecCtx = avctx;
1273 mVideo.mCurrentPtsTime = av_gettime();
1274 mVideo.mFrameTimer = (double)mVideo.mCurrentPtsTime / 1000000.0;
1275 mVideo.mFrameLastDelay = 40e-3;
1277 mVideoThread = std::thread(std::mem_fn(&VideoState::handler), &mVideo);
1278 break;
1280 default:
1281 avcodec_free_context(&avctx);
1282 break;
1285 return 0;
1288 int MovieState::parse_handler()
1290 int video_index = -1;
1291 int audio_index = -1;
1293 mVideoStream = -1;
1294 mAudioStream = -1;
1296 /* Dump information about file onto standard error */
1297 av_dump_format(mFormatCtx, 0, mFilename.c_str(), 0);
1299 /* Find the first video and audio streams */
1300 for(unsigned int i = 0;i < mFormatCtx->nb_streams;i++)
1302 if(mFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0)
1303 video_index = i;
1304 else if(mFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
1305 audio_index = i;
1307 /* Start the external clock in 50ms, to give the audio and video
1308 * components time to start without needing to skip ahead.
1310 mExternalClockBase = av_gettime() + 50000;
1311 if(audio_index >= 0)
1312 streamComponentOpen(audio_index);
1313 if(video_index >= 0)
1314 streamComponentOpen(video_index);
1316 if(mVideoStream < 0 && mAudioStream < 0)
1318 std::cerr<< mFilename<<": could not open codecs" <<std::endl;
1319 mQuit = true;
1322 /* Main packet handling loop */
1323 while(!mQuit.load())
1325 if(mAudio.mQueue.mTotalSize + mVideo.mQueue.mTotalSize >= MAX_QUEUE_SIZE)
1327 std::this_thread::sleep_for(std::chrono::milliseconds(10));
1328 continue;
1331 AVPacket packet;
1332 if(av_read_frame(mFormatCtx, &packet) < 0)
1333 break;
1335 /* Copy the packet in the queue it's meant for. */
1336 if(packet.stream_index == mVideoStream)
1337 mVideo.mQueue.put(&packet);
1338 else if(packet.stream_index == mAudioStream)
1339 mAudio.mQueue.put(&packet);
1340 av_packet_unref(&packet);
1342 mVideo.mQueue.finish();
1343 mAudio.mQueue.finish();
1345 /* all done - wait for it */
1346 if(mVideoThread.joinable())
1347 mVideoThread.join();
1348 if(mAudioThread.joinable())
1349 mAudioThread.join();
1351 mVideo.mEOS = true;
1352 std::unique_lock<std::mutex> lock(mVideo.mPictQMutex);
1353 while(!mVideo.mFinalUpdate)
1354 mVideo.mPictQCond.wait(lock);
1355 lock.unlock();
1357 SDL_Event evt{};
1358 evt.user.type = FF_MOVIE_DONE_EVENT;
1359 SDL_PushEvent(&evt);
1361 return 0;
1364 } // namespace
1367 int main(int argc, char *argv[])
1369 std::unique_ptr<MovieState> movState;
1371 if(argc < 2)
1373 std::cerr<< "Usage: "<<argv[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl;
1374 return 1;
1376 /* Register all formats and codecs */
1377 av_register_all();
1378 /* Initialize networking protocols */
1379 avformat_network_init();
1381 if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER))
1383 std::cerr<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl;
1384 return 1;
1387 /* Make a window to put our video */
1388 SDL_Window *screen = SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE);
1389 if(!screen)
1391 std::cerr<< "SDL: could not set video mode - exiting" <<std::endl;
1392 return 1;
1394 /* Make a renderer to handle the texture image surface and rendering. */
1395 SDL_Renderer *renderer = SDL_CreateRenderer(screen, -1, SDL_RENDERER_ACCELERATED);
1396 if(renderer)
1398 SDL_RendererInfo rinf{};
1399 bool ok = false;
1401 /* Make sure the renderer supports IYUV textures. If not, fallback to a
1402 * software renderer. */
1403 if(SDL_GetRendererInfo(renderer, &rinf) == 0)
1405 for(Uint32 i = 0;!ok && i < rinf.num_texture_formats;i++)
1406 ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_IYUV);
1408 if(!ok)
1410 std::cerr<< "IYUV pixelformat textures not supported on renderer "<<rinf.name <<std::endl;
1411 SDL_DestroyRenderer(renderer);
1412 renderer = nullptr;
1415 if(!renderer)
1416 renderer = SDL_CreateRenderer(screen, -1, SDL_RENDERER_SOFTWARE);
1417 if(!renderer)
1419 std::cerr<< "SDL: could not create renderer - exiting" <<std::endl;
1420 return 1;
1422 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1423 SDL_RenderFillRect(renderer, nullptr);
1424 SDL_RenderPresent(renderer);
1426 /* Open an audio device */
1427 int fileidx = 1;
1428 ALCdevice *device = [argc,argv,&fileidx]() -> ALCdevice*
1430 ALCdevice *dev = NULL;
1431 if(argc > 3 && strcmp(argv[1], "-device") == 0)
1433 fileidx = 3;
1434 dev = alcOpenDevice(argv[2]);
1435 if(dev) return dev;
1436 std::cerr<< "Failed to open \""<<argv[2]<<"\" - trying default" <<std::endl;
1438 return alcOpenDevice(nullptr);
1439 }();
1440 ALCcontext *context = alcCreateContext(device, nullptr);
1441 if(!context || alcMakeContextCurrent(context) == ALC_FALSE)
1443 std::cerr<< "Failed to set up audio device" <<std::endl;
1444 if(context)
1445 alcDestroyContext(context);
1446 return 1;
1449 const ALCchar *name = nullptr;
1450 if(alcIsExtensionPresent(device, "ALC_ENUMERATE_ALL_EXT"))
1451 name = alcGetString(device, ALC_ALL_DEVICES_SPECIFIER);
1452 if(!name || alcGetError(device) != AL_NO_ERROR)
1453 name = alcGetString(device, ALC_DEVICE_SPECIFIER);
1454 std::cout<< "Opened \""<<name<<"\"" <<std::endl;
1456 if(fileidx < argc && strcmp(argv[fileidx], "-direct") == 0)
1458 ++fileidx;
1459 do_direct_out = true;
1462 while(fileidx < argc && !movState)
1464 movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
1465 if(!movState->prepare()) movState = nullptr;
1467 if(!movState)
1469 std::cerr<< "Could not start a video" <<std::endl;
1470 return 1;
1472 movState->setTitle(screen);
1474 /* Default to going to the next movie at the end of one. */
1475 enum class EomAction {
1476 Next, Quit
1477 } eom_action = EomAction::Next;
1478 SDL_Event event;
1479 while(SDL_WaitEvent(&event) == 1)
1481 switch(event.type)
1483 case SDL_KEYDOWN:
1484 switch(event.key.keysym.sym)
1486 case SDLK_ESCAPE:
1487 movState->mQuit = true;
1488 eom_action = EomAction::Quit;
1489 break;
1491 case SDLK_n:
1492 movState->mQuit = true;
1493 eom_action = EomAction::Next;
1494 break;
1496 default:
1497 break;
1499 break;
1501 case SDL_WINDOWEVENT:
1502 switch(event.window.event)
1504 case SDL_WINDOWEVENT_RESIZED:
1505 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1506 SDL_RenderFillRect(renderer, nullptr);
1507 break;
1509 default:
1510 break;
1512 break;
1514 case SDL_QUIT:
1515 movState->mQuit = true;
1516 eom_action = EomAction::Quit;
1517 break;
1519 case FF_UPDATE_EVENT:
1520 reinterpret_cast<VideoState*>(event.user.data1)->updatePicture(
1521 screen, renderer
1523 break;
1525 case FF_REFRESH_EVENT:
1526 reinterpret_cast<VideoState*>(event.user.data1)->refreshTimer(
1527 screen, renderer
1529 break;
1531 case FF_MOVIE_DONE_EVENT:
1532 if(eom_action != EomAction::Quit)
1534 movState = nullptr;
1535 while(fileidx < argc && !movState)
1537 movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
1538 if(!movState->prepare()) movState = nullptr;
1540 if(movState)
1542 movState->setTitle(screen);
1543 break;
1547 /* Nothing more to play. Shut everything down and quit. */
1548 movState = nullptr;
1550 alcMakeContextCurrent(nullptr);
1551 alcDestroyContext(context);
1552 alcCloseDevice(device);
1554 SDL_DestroyRenderer(renderer);
1555 renderer = nullptr;
1556 SDL_DestroyWindow(screen);
1557 screen = nullptr;
1559 SDL_Quit();
1560 exit(0);
1562 default:
1563 break;
1567 std::cerr<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl;
1568 return 1;