Add a missing include
[openal-soft.git] / examples / alffplay.cpp
blobb2033b976fa3c8046524e8027f0819d97d91c322
1 /*
2 * An example showing how to play a stream sync'd to video, using ffmpeg.
4 * Requires C++11.
5 */
7 #include <condition_variable>
8 #include <functional>
9 #include <algorithm>
10 #include <iostream>
11 #include <iomanip>
12 #include <cstring>
13 #include <limits>
14 #include <thread>
15 #include <chrono>
16 #include <atomic>
17 #include <mutex>
18 #include <deque>
20 extern "C" {
21 #include "libavcodec/avcodec.h"
22 #include "libavformat/avformat.h"
23 #include "libavformat/avio.h"
24 #include "libavutil/time.h"
25 #include "libavutil/pixfmt.h"
26 #include "libavutil/avstring.h"
27 #include "libavutil/channel_layout.h"
28 #include "libswscale/swscale.h"
29 #include "libswresample/swresample.h"
32 #include "SDL.h"
34 #include "AL/alc.h"
35 #include "AL/al.h"
36 #include "AL/alext.h"
38 namespace
41 static const std::string AppName("alffplay");
43 static bool has_latency_check = false;
44 static LPALGETSOURCEDVSOFT alGetSourcedvSOFT;
46 #define AUDIO_BUFFER_TIME 100 /* In milliseconds, per-buffer */
47 #define AUDIO_BUFFER_QUEUE_SIZE 8 /* Number of buffers to queue */
48 #define MAX_QUEUE_SIZE (15 * 1024 * 1024) /* Bytes of compressed data to keep queued */
49 #define AV_SYNC_THRESHOLD 0.01
50 #define AV_NOSYNC_THRESHOLD 10.0
51 #define SAMPLE_CORRECTION_MAX_DIFF 0.05
52 #define AUDIO_DIFF_AVG_NB 20
53 #define VIDEO_PICTURE_QUEUE_SIZE 16
55 enum {
56 FF_UPDATE_EVENT = SDL_USEREVENT,
57 FF_REFRESH_EVENT,
58 FF_MOVIE_DONE_EVENT
61 enum {
62 AV_SYNC_AUDIO_MASTER,
63 AV_SYNC_VIDEO_MASTER,
64 AV_SYNC_EXTERNAL_MASTER,
66 DEFAULT_AV_SYNC_TYPE = AV_SYNC_EXTERNAL_MASTER
70 struct PacketQueue {
71 std::deque<AVPacket> mPackets;
72 std::atomic<int> mTotalSize;
73 std::atomic<bool> mFinished;
74 std::mutex mMutex;
75 std::condition_variable mCond;
77 PacketQueue() : mTotalSize(0), mFinished(false)
78 { }
79 ~PacketQueue()
80 { clear(); }
82 int put(const AVPacket *pkt);
83 int peek(AVPacket *pkt, std::atomic<bool> &quit_var);
84 void pop();
86 void clear();
87 void finish();
91 struct MovieState;
93 struct AudioState {
94 MovieState *mMovie;
96 AVStream *mStream;
97 AVCodecContext *mCodecCtx;
99 PacketQueue mQueue;
101 /* Used for clock difference average computation */
102 struct {
103 std::atomic<int> Clocks; /* In microseconds */
104 double Accum;
105 double AvgCoeff;
106 double Threshold;
107 int AvgCount;
108 } mDiff;
110 /* Time (in seconds) of the next sample to be buffered */
111 double mCurrentPts;
113 /* Decompressed sample frame, and swresample context for conversion */
114 AVFrame *mDecodedFrame;
115 struct SwrContext *mSwresCtx;
117 /* Conversion format, for what gets fed to Alure */
118 int mDstChanLayout;
119 enum AVSampleFormat mDstSampleFmt;
121 /* Storage of converted samples */
122 uint8_t *mSamples;
123 int mSamplesLen; /* In samples */
124 int mSamplesPos;
125 int mSamplesMax;
127 /* OpenAL format */
128 ALenum mFormat;
129 ALsizei mFrameSize;
131 std::recursive_mutex mSrcMutex;
132 ALuint mSource;
133 ALuint mBuffers[AUDIO_BUFFER_QUEUE_SIZE];
134 ALsizei mBufferIdx;
136 AudioState(MovieState *movie)
137 : mMovie(movie), mStream(nullptr), mCodecCtx(nullptr)
138 , mDiff{{0}, 0.0, 0.0, 0.0, 0}, mCurrentPts(0.0), mDecodedFrame(nullptr)
139 , mSwresCtx(nullptr), mDstChanLayout(0), mDstSampleFmt(AV_SAMPLE_FMT_NONE)
140 , mSamples(nullptr), mSamplesLen(0), mSamplesPos(0), mSamplesMax(0)
141 , mFormat(AL_NONE), mFrameSize(0), mSource(0), mBufferIdx(0)
143 for(auto &buf : mBuffers)
144 buf = 0;
146 ~AudioState()
148 if(mSource)
149 alDeleteSources(1, &mSource);
150 alDeleteBuffers(AUDIO_BUFFER_QUEUE_SIZE, mBuffers);
152 av_frame_free(&mDecodedFrame);
153 swr_free(&mSwresCtx);
155 av_freep(&mSamples);
157 avcodec_free_context(&mCodecCtx);
160 double getClock();
162 int getSync();
163 int decodeFrame();
164 int readAudio(uint8_t *samples, int length);
166 int handler();
169 struct VideoState {
170 MovieState *mMovie;
172 AVStream *mStream;
173 AVCodecContext *mCodecCtx;
175 PacketQueue mQueue;
177 double mClock;
178 double mFrameTimer;
179 double mFrameLastPts;
180 double mFrameLastDelay;
181 double mCurrentPts;
182 /* time (av_gettime) at which we updated mCurrentPts - used to have running video pts */
183 int64_t mCurrentPtsTime;
185 /* Decompressed video frame, and swscale context for conversion */
186 AVFrame *mDecodedFrame;
187 struct SwsContext *mSwscaleCtx;
189 struct Picture {
190 SDL_Texture *mImage;
191 int mWidth, mHeight; /* Logical image size (actual size may be larger) */
192 std::atomic<bool> mUpdated;
193 double mPts;
195 Picture()
196 : mImage(nullptr), mWidth(0), mHeight(0), mUpdated(false), mPts(0.0)
198 ~Picture()
200 if(mImage)
201 SDL_DestroyTexture(mImage);
202 mImage = nullptr;
205 std::array<Picture,VIDEO_PICTURE_QUEUE_SIZE> mPictQ;
206 size_t mPictQSize, mPictQRead, mPictQWrite;
207 std::mutex mPictQMutex;
208 std::condition_variable mPictQCond;
209 bool mFirstUpdate;
210 std::atomic<bool> mEOS;
211 std::atomic<bool> mFinalUpdate;
213 VideoState(MovieState *movie)
214 : mMovie(movie), mStream(nullptr), mCodecCtx(nullptr), mClock(0.0)
215 , mFrameTimer(0.0), mFrameLastPts(0.0), mFrameLastDelay(0.0)
216 , mCurrentPts(0.0), mCurrentPtsTime(0), mDecodedFrame(nullptr)
217 , mSwscaleCtx(nullptr), mPictQSize(0), mPictQRead(0), mPictQWrite(0)
218 , mFirstUpdate(true), mEOS(false), mFinalUpdate(false)
220 ~VideoState()
222 sws_freeContext(mSwscaleCtx);
223 mSwscaleCtx = nullptr;
224 av_frame_free(&mDecodedFrame);
225 avcodec_free_context(&mCodecCtx);
228 double getClock();
230 static Uint32 SDLCALL sdl_refresh_timer_cb(Uint32 interval, void *opaque);
231 void schedRefresh(int delay);
232 void display(SDL_Window *screen, SDL_Renderer *renderer);
233 void refreshTimer(SDL_Window *screen, SDL_Renderer *renderer);
234 void updatePicture(SDL_Window *screen, SDL_Renderer *renderer);
235 int queuePicture(double pts);
236 double synchronize(double pts);
237 int handler();
240 struct MovieState {
241 AVFormatContext *mFormatCtx;
242 int mVideoStream, mAudioStream;
244 int mAVSyncType;
246 int64_t mExternalClockBase;
248 std::atomic<bool> mQuit;
250 AudioState mAudio;
251 VideoState mVideo;
253 std::thread mParseThread;
254 std::thread mAudioThread;
255 std::thread mVideoThread;
257 std::string mFilename;
259 MovieState(std::string fname)
260 : mFormatCtx(nullptr), mVideoStream(0), mAudioStream(0)
261 , mAVSyncType(DEFAULT_AV_SYNC_TYPE), mExternalClockBase(0), mQuit(false)
262 , mAudio(this), mVideo(this), mFilename(std::move(fname))
264 ~MovieState()
266 mQuit = true;
267 if(mParseThread.joinable())
268 mParseThread.join();
269 avformat_close_input(&mFormatCtx);
272 static int decode_interrupt_cb(void *ctx);
273 bool prepare();
274 void setTitle(SDL_Window *window);
276 double getClock();
278 double getMasterClock();
280 int streamComponentOpen(int stream_index);
281 int parse_handler();
285 int PacketQueue::put(const AVPacket *pkt)
287 std::unique_lock<std::mutex> lock(mMutex);
288 mPackets.push_back(AVPacket{});
289 if(av_packet_ref(&mPackets.back(), pkt) != 0)
291 mPackets.pop_back();
292 return -1;
294 mTotalSize += mPackets.back().size;
295 lock.unlock();
297 mCond.notify_one();
298 return 0;
301 int PacketQueue::peek(AVPacket *pkt, std::atomic<bool> &quit_var)
303 std::unique_lock<std::mutex> lock(mMutex);
304 while(!quit_var.load())
306 if(!mPackets.empty())
308 if(av_packet_ref(pkt, &mPackets.front()) != 0)
309 return -1;
310 return 1;
313 if(mFinished.load())
314 return 0;
315 mCond.wait(lock);
317 return -1;
320 void PacketQueue::pop()
322 std::unique_lock<std::mutex> lock(mMutex);
323 AVPacket *pkt = &mPackets.front();
324 mTotalSize -= pkt->size;
325 av_packet_unref(pkt);
326 mPackets.pop_front();
329 void PacketQueue::clear()
331 std::unique_lock<std::mutex> lock(mMutex);
332 std::for_each(mPackets.begin(), mPackets.end(),
333 [](AVPacket &pkt) { av_packet_unref(&pkt); }
335 mPackets.clear();
336 mTotalSize = 0;
338 void PacketQueue::finish()
340 std::unique_lock<std::mutex> lock(mMutex);
341 mFinished = true;
342 lock.unlock();
343 mCond.notify_all();
347 double AudioState::getClock()
349 double pts;
351 std::unique_lock<std::recursive_mutex> lock(mSrcMutex);
352 /* The audio clock is the timestamp of the sample currently being heard.
353 * It's based on 4 components:
354 * 1 - The timestamp of the next sample to buffer (state->current_pts)
355 * 2 - The length of the source's buffer queue
356 * 3 - The offset OpenAL is currently at in the source (the first value
357 * from AL_SEC_OFFSET_LATENCY_SOFT)
358 * 4 - The latency between OpenAL and the DAC (the second value from
359 * AL_SEC_OFFSET_LATENCY_SOFT)
361 * Subtracting the length of the source queue from the next sample's
362 * timestamp gives the timestamp of the sample at start of the source
363 * queue. Adding the source offset to that results in the timestamp for
364 * OpenAL's current position, and subtracting the source latency from that
365 * gives the timestamp of the sample currently at the DAC.
367 pts = mCurrentPts;
368 if(mSource)
370 ALdouble offset[2];
371 ALint queue_size;
372 ALint status;
374 /* NOTE: The source state must be checked last, in case an underrun
375 * occurs and the source stops between retrieving the offset+latency
376 * and getting the state. */
377 if(has_latency_check)
379 alGetSourcedvSOFT(mSource, AL_SEC_OFFSET_LATENCY_SOFT, offset);
380 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queue_size);
382 else
384 ALint ioffset;
385 alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
386 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queue_size);
387 offset[0] = (double)ioffset / (double)mCodecCtx->sample_rate;
388 offset[1] = 0.0f;
390 alGetSourcei(mSource, AL_SOURCE_STATE, &status);
392 /* If the source is AL_STOPPED, then there was an underrun and all
393 * buffers are processed, so ignore the source queue. The audio thread
394 * will put the source into an AL_INITIAL state and clear the queue
395 * when it starts recovery. */
396 if(status != AL_STOPPED)
397 pts -= queue_size*((double)AUDIO_BUFFER_TIME/1000.0) - offset[0];
398 if(status == AL_PLAYING)
399 pts -= offset[1];
401 lock.unlock();
403 return std::max(pts, 0.0);
406 int AudioState::getSync()
408 double diff, avg_diff, ref_clock;
410 if(mMovie->mAVSyncType == AV_SYNC_AUDIO_MASTER)
411 return 0;
413 ref_clock = mMovie->getMasterClock();
414 diff = ref_clock - getClock();
416 if(!(fabs(diff) < AV_NOSYNC_THRESHOLD))
418 /* Difference is TOO big; reset diff stuff */
419 mDiff.Accum = 0.0;
420 return 0;
423 /* Accumulate the diffs */
424 mDiff.Accum = mDiff.Accum*mDiff.AvgCoeff + diff;
425 avg_diff = mDiff.Accum*(1.0 - mDiff.AvgCoeff);
426 if(fabs(avg_diff) < mDiff.Threshold)
427 return 0;
429 /* Constrain the per-update difference to avoid exceedingly large skips */
430 if(!(diff <= SAMPLE_CORRECTION_MAX_DIFF))
431 diff = SAMPLE_CORRECTION_MAX_DIFF;
432 else if(!(diff >= -SAMPLE_CORRECTION_MAX_DIFF))
433 diff = -SAMPLE_CORRECTION_MAX_DIFF;
434 return (int)(diff*mCodecCtx->sample_rate);
437 int AudioState::decodeFrame()
439 while(!mMovie->mQuit.load())
441 while(!mMovie->mQuit.load())
443 /* Get the next packet */
444 AVPacket pkt{};
445 if(mQueue.peek(&pkt, mMovie->mQuit) <= 0)
446 return -1;
448 int ret = avcodec_send_packet(mCodecCtx, &pkt);
449 if(ret != AVERROR(EAGAIN))
451 if(ret < 0)
452 std::cerr<< "Failed to send encoded packet: 0x"<<std::hex<<ret<<std::dec <<std::endl;
453 mQueue.pop();
455 av_packet_unref(&pkt);
456 if(ret == 0 || ret == AVERROR(EAGAIN))
457 break;
460 int ret = avcodec_receive_frame(mCodecCtx, mDecodedFrame);
461 if(ret == AVERROR(EAGAIN))
462 continue;
463 if(ret == AVERROR_EOF || ret < 0)
465 std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
466 return 0;
469 if(mDecodedFrame->nb_samples <= 0)
471 av_frame_unref(mDecodedFrame);
472 continue;
475 /* If provided, update w/ pts */
476 int64_t pts = av_frame_get_best_effort_timestamp(mDecodedFrame);
477 if(pts != AV_NOPTS_VALUE)
478 mCurrentPts = av_q2d(mStream->time_base)*pts;
480 if(mDecodedFrame->nb_samples > mSamplesMax)
482 av_freep(&mSamples);
483 av_samples_alloc(
484 &mSamples, nullptr, mCodecCtx->channels,
485 mDecodedFrame->nb_samples, mDstSampleFmt, 0
487 mSamplesMax = mDecodedFrame->nb_samples;
489 /* Return the amount of sample frames converted */
490 int data_size = swr_convert(mSwresCtx, &mSamples, mDecodedFrame->nb_samples,
491 (const uint8_t**)mDecodedFrame->data, mDecodedFrame->nb_samples
494 av_frame_unref(mDecodedFrame);
495 return data_size;
498 return 0;
501 /* Duplicates the sample at in to out, count times. The frame size is a
502 * multiple of the template type size.
504 template<typename T>
505 static void sample_dup(uint8_t *out, const uint8_t *in, int count, int frame_size)
507 const T *sample = reinterpret_cast<const T*>(in);
508 T *dst = reinterpret_cast<T*>(out);
509 if(frame_size == sizeof(T))
510 std::fill_n(dst, count, *sample);
511 else
513 /* NOTE: frame_size is a multiple of sizeof(T). */
514 int type_mult = frame_size / sizeof(T);
515 int i = 0;
516 std::generate_n(dst, count*type_mult,
517 [sample,type_mult,&i]() -> T
519 T ret = sample[i];
520 i = (i+1)%type_mult;
521 return ret;
528 int AudioState::readAudio(uint8_t *samples, int length)
530 int sample_skip = getSync();
531 int audio_size = 0;
533 /* Read the next chunk of data, refill the buffer, and queue it
534 * on the source */
535 length /= mFrameSize;
536 while(audio_size < length)
538 if(mSamplesLen <= 0 || mSamplesPos >= mSamplesLen)
540 int frame_len = decodeFrame();
541 if(frame_len <= 0) break;
543 mSamplesLen = frame_len;
544 mSamplesPos = std::min(mSamplesLen, sample_skip);
545 sample_skip -= mSamplesPos;
547 mCurrentPts += (double)mSamplesPos / (double)mCodecCtx->sample_rate;
548 continue;
551 int rem = length - audio_size;
552 if(mSamplesPos >= 0)
554 int len = mSamplesLen - mSamplesPos;
555 if(rem > len) rem = len;
556 memcpy(samples, mSamples + mSamplesPos*mFrameSize, rem*mFrameSize);
558 else
560 rem = std::min(rem, -mSamplesPos);
562 /* Add samples by copying the first sample */
563 if((mFrameSize&7) == 0)
564 sample_dup<uint64_t>(samples, mSamples, rem, mFrameSize);
565 else if((mFrameSize&3) == 0)
566 sample_dup<uint32_t>(samples, mSamples, rem, mFrameSize);
567 else if((mFrameSize&1) == 0)
568 sample_dup<uint16_t>(samples, mSamples, rem, mFrameSize);
569 else
570 sample_dup<uint8_t>(samples, mSamples, rem, mFrameSize);
573 mSamplesPos += rem;
574 mCurrentPts += (double)rem / mCodecCtx->sample_rate;
575 samples += rem*mFrameSize;
576 audio_size += rem;
579 if(audio_size < length && audio_size > 0)
581 int rem = length - audio_size;
582 std::fill_n(samples, rem*mFrameSize,
583 (mDstSampleFmt == AV_SAMPLE_FMT_U8) ? 0x80 : 0x00);
584 mCurrentPts += (double)rem / mCodecCtx->sample_rate;
585 audio_size += rem;
588 return audio_size * mFrameSize;
592 int AudioState::handler()
594 std::unique_lock<std::recursive_mutex> lock(mSrcMutex);
595 ALenum fmt;
597 /* Find a suitable format for Alure. */
598 mDstChanLayout = 0;
599 if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P)
601 mDstSampleFmt = AV_SAMPLE_FMT_U8;
602 mFrameSize = 1;
603 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
604 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
605 (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1)
607 mDstChanLayout = mCodecCtx->channel_layout;
608 mFrameSize *= 8;
609 mFormat = fmt;
611 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
612 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
613 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
614 (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1)
616 mDstChanLayout = mCodecCtx->channel_layout;
617 mFrameSize *= 6;
618 mFormat = fmt;
620 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
622 mDstChanLayout = mCodecCtx->channel_layout;
623 mFrameSize *= 1;
624 mFormat = AL_FORMAT_MONO8;
626 if(!mDstChanLayout)
628 mDstChanLayout = AV_CH_LAYOUT_STEREO;
629 mFrameSize *= 2;
630 mFormat = AL_FORMAT_STEREO8;
633 if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP) &&
634 alIsExtensionPresent("AL_EXT_FLOAT32"))
636 mDstSampleFmt = AV_SAMPLE_FMT_FLT;
637 mFrameSize = 4;
638 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
639 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
640 (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1)
642 mDstChanLayout = mCodecCtx->channel_layout;
643 mFrameSize *= 8;
644 mFormat = fmt;
646 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
647 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
648 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
649 (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1)
651 mDstChanLayout = mCodecCtx->channel_layout;
652 mFrameSize *= 6;
653 mFormat = fmt;
655 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
657 mDstChanLayout = mCodecCtx->channel_layout;
658 mFrameSize *= 1;
659 mFormat = AL_FORMAT_MONO_FLOAT32;
661 if(!mDstChanLayout)
663 mDstChanLayout = AV_CH_LAYOUT_STEREO;
664 mFrameSize *= 2;
665 mFormat = AL_FORMAT_STEREO_FLOAT32;
668 if(!mDstChanLayout)
670 mDstSampleFmt = AV_SAMPLE_FMT_S16;
671 mFrameSize = 2;
672 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
673 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
674 (fmt=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE && fmt != -1)
676 mDstChanLayout = mCodecCtx->channel_layout;
677 mFrameSize *= 8;
678 mFormat = fmt;
680 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
681 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
682 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
683 (fmt=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE && fmt != -1)
685 mDstChanLayout = mCodecCtx->channel_layout;
686 mFrameSize *= 6;
687 mFormat = fmt;
689 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
691 mDstChanLayout = mCodecCtx->channel_layout;
692 mFrameSize *= 1;
693 mFormat = AL_FORMAT_MONO16;
695 if(!mDstChanLayout)
697 mDstChanLayout = AV_CH_LAYOUT_STEREO;
698 mFrameSize *= 2;
699 mFormat = AL_FORMAT_STEREO16;
702 ALsizei buffer_len = mCodecCtx->sample_rate * AUDIO_BUFFER_TIME / 1000 *
703 mFrameSize;
704 void *samples = av_malloc(buffer_len);
706 mSamples = NULL;
707 mSamplesMax = 0;
708 mSamplesPos = 0;
709 mSamplesLen = 0;
711 if(!(mDecodedFrame=av_frame_alloc()))
713 std::cerr<< "Failed to allocate audio frame" <<std::endl;
714 goto finish;
717 mSwresCtx = swr_alloc_set_opts(nullptr,
718 mDstChanLayout, mDstSampleFmt, mCodecCtx->sample_rate,
719 mCodecCtx->channel_layout ? mCodecCtx->channel_layout :
720 (uint64_t)av_get_default_channel_layout(mCodecCtx->channels),
721 mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
722 0, nullptr
724 if(!mSwresCtx || swr_init(mSwresCtx) != 0)
726 std::cerr<< "Failed to initialize audio converter" <<std::endl;
727 goto finish;
730 alGenBuffers(AUDIO_BUFFER_QUEUE_SIZE, mBuffers);
731 alGenSources(1, &mSource);
733 while(alGetError() == AL_NO_ERROR && !mMovie->mQuit.load())
735 /* First remove any processed buffers. */
736 ALint processed;
737 alGetSourcei(mSource, AL_BUFFERS_PROCESSED, &processed);
738 if(processed > 0)
740 std::array<ALuint,AUDIO_BUFFER_QUEUE_SIZE> tmp;
741 alSourceUnqueueBuffers(mSource, processed, tmp.data());
744 /* Refill the buffer queue. */
745 ALint queued;
746 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
747 while(queued < AUDIO_BUFFER_QUEUE_SIZE)
749 int audio_size;
751 /* Read the next chunk of data, fill the buffer, and queue it on
752 * the source */
753 audio_size = readAudio(reinterpret_cast<uint8_t*>(samples), buffer_len);
754 if(audio_size <= 0) break;
756 ALuint bufid = mBuffers[mBufferIdx++];
757 mBufferIdx %= AUDIO_BUFFER_QUEUE_SIZE;
759 alBufferData(bufid, mFormat, samples, audio_size, mCodecCtx->sample_rate);
760 alSourceQueueBuffers(mSource, 1, &bufid);
761 queued++;
763 if(queued == 0)
764 break;
766 /* Check that the source is playing. */
767 ALint state;
768 alGetSourcei(mSource, AL_SOURCE_STATE, &state);
769 if(state == AL_STOPPED)
771 /* AL_STOPPED means there was an underrun. Rewind the source to get
772 * it back into an AL_INITIAL state.
774 alSourceRewind(mSource);
775 continue;
778 lock.unlock();
780 /* (re)start the source if needed, and wait for a buffer to finish */
781 if(state != AL_PLAYING && state != AL_PAUSED)
782 alSourcePlay(mSource);
783 SDL_Delay(AUDIO_BUFFER_TIME / 3);
785 lock.lock();
788 finish:
789 alSourceRewind(mSource);
790 alSourcei(mSource, AL_BUFFER, 0);
792 av_frame_free(&mDecodedFrame);
793 swr_free(&mSwresCtx);
795 av_freep(&mSamples);
797 return 0;
801 double VideoState::getClock()
803 double delta = (av_gettime() - mCurrentPtsTime) / 1000000.0;
804 return mCurrentPts + delta;
807 Uint32 SDLCALL VideoState::sdl_refresh_timer_cb(Uint32 /*interval*/, void *opaque)
809 SDL_Event evt{};
810 evt.user.type = FF_REFRESH_EVENT;
811 evt.user.data1 = opaque;
812 SDL_PushEvent(&evt);
813 return 0; /* 0 means stop timer */
816 /* Schedules an FF_REFRESH_EVENT event to occur in 'delay' ms. */
817 void VideoState::schedRefresh(int delay)
819 SDL_AddTimer(delay, sdl_refresh_timer_cb, this);
822 /* Called by VideoState::refreshTimer to display the next video frame. */
823 void VideoState::display(SDL_Window *screen, SDL_Renderer *renderer)
825 Picture *vp = &mPictQ[mPictQRead];
827 if(!vp->mImage)
828 return;
830 float aspect_ratio;
831 int win_w, win_h;
832 int w, h, x, y;
834 if(mCodecCtx->sample_aspect_ratio.num == 0)
835 aspect_ratio = 0.0f;
836 else
838 aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio) * mCodecCtx->width /
839 mCodecCtx->height;
841 if(aspect_ratio <= 0.0f)
842 aspect_ratio = (float)mCodecCtx->width / (float)mCodecCtx->height;
844 SDL_GetWindowSize(screen, &win_w, &win_h);
845 h = win_h;
846 w = ((int)rint(h * aspect_ratio) + 3) & ~3;
847 if(w > win_w)
849 w = win_w;
850 h = ((int)rint(w / aspect_ratio) + 3) & ~3;
852 x = (win_w - w) / 2;
853 y = (win_h - h) / 2;
855 SDL_Rect src_rect{ 0, 0, vp->mWidth, vp->mHeight };
856 SDL_Rect dst_rect{ x, y, w, h };
857 SDL_RenderCopy(renderer, vp->mImage, &src_rect, &dst_rect);
858 SDL_RenderPresent(renderer);
861 /* FF_REFRESH_EVENT handler called on the main thread where the SDL_Renderer
862 * was created. It handles the display of the next decoded video frame (if not
863 * falling behind), and sets up the timer for the following video frame.
865 void VideoState::refreshTimer(SDL_Window *screen, SDL_Renderer *renderer)
867 if(!mStream)
869 if(mEOS)
871 mFinalUpdate = true;
872 std::unique_lock<std::mutex>(mPictQMutex).unlock();
873 mPictQCond.notify_all();
874 return;
876 schedRefresh(100);
877 return;
880 std::unique_lock<std::mutex> lock(mPictQMutex);
881 retry:
882 if(mPictQSize == 0)
884 if(mEOS)
885 mFinalUpdate = true;
886 else
887 schedRefresh(1);
888 lock.unlock();
889 mPictQCond.notify_all();
890 return;
893 Picture *vp = &mPictQ[mPictQRead];
894 mCurrentPts = vp->mPts;
895 mCurrentPtsTime = av_gettime();
897 /* Get delay using the frame pts and the pts from last frame. */
898 double delay = vp->mPts - mFrameLastPts;
899 if(delay <= 0 || delay >= 1.0)
901 /* If incorrect delay, use previous one. */
902 delay = mFrameLastDelay;
904 /* Save for next frame. */
905 mFrameLastDelay = delay;
906 mFrameLastPts = vp->mPts;
908 /* Update delay to sync to clock if not master source. */
909 if(mMovie->mAVSyncType != AV_SYNC_VIDEO_MASTER)
911 double ref_clock = mMovie->getMasterClock();
912 double diff = vp->mPts - ref_clock;
914 /* Skip or repeat the frame. Take delay into account. */
915 double sync_threshold = std::min(delay, AV_SYNC_THRESHOLD);
916 if(fabs(diff) < AV_NOSYNC_THRESHOLD)
918 if(diff <= -sync_threshold)
919 delay = 0;
920 else if(diff >= sync_threshold)
921 delay *= 2.0;
925 mFrameTimer += delay;
926 /* Compute the REAL delay. */
927 double actual_delay = mFrameTimer - (av_gettime() / 1000000.0);
928 if(!(actual_delay >= 0.010))
930 /* We don't have time to handle this picture, just skip to the next one. */
931 mPictQRead = (mPictQRead+1)%mPictQ.size();
932 mPictQSize--;
933 goto retry;
935 schedRefresh((int)(actual_delay*1000.0 + 0.5));
937 /* Show the picture! */
938 display(screen, renderer);
940 /* Update queue for next picture. */
941 mPictQRead = (mPictQRead+1)%mPictQ.size();
942 mPictQSize--;
943 lock.unlock();
944 mPictQCond.notify_all();
947 /* FF_UPDATE_EVENT handler, updates the picture's texture. It's called on the
948 * main thread where the renderer was created.
950 void VideoState::updatePicture(SDL_Window *screen, SDL_Renderer *renderer)
952 Picture *vp = &mPictQ[mPictQWrite];
953 bool fmt_updated = false;
955 /* allocate or resize the buffer! */
956 if(!vp->mImage || vp->mWidth != mCodecCtx->width || vp->mHeight != mCodecCtx->height)
958 fmt_updated = true;
959 if(vp->mImage)
960 SDL_DestroyTexture(vp->mImage);
961 vp->mImage = SDL_CreateTexture(
962 renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
963 mCodecCtx->coded_width, mCodecCtx->coded_height
965 if(!vp->mImage)
966 std::cerr<< "Failed to create YV12 texture!" <<std::endl;
967 vp->mWidth = mCodecCtx->width;
968 vp->mHeight = mCodecCtx->height;
970 if(mFirstUpdate && vp->mWidth > 0 && vp->mHeight > 0)
972 /* For the first update, set the window size to the video size. */
973 mFirstUpdate = false;
975 int w = vp->mWidth;
976 int h = vp->mHeight;
977 if(mCodecCtx->sample_aspect_ratio.den != 0)
979 double aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio);
980 if(aspect_ratio >= 1.0)
981 w = (int)(w*aspect_ratio + 0.5);
982 else if(aspect_ratio > 0.0)
983 h = (int)(h/aspect_ratio + 0.5);
985 SDL_SetWindowSize(screen, w, h);
989 if(vp->mImage)
991 AVFrame *frame = mDecodedFrame;
992 void *pixels = nullptr;
993 int pitch = 0;
995 if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P)
996 SDL_UpdateYUVTexture(vp->mImage, nullptr,
997 frame->data[0], frame->linesize[0],
998 frame->data[1], frame->linesize[1],
999 frame->data[2], frame->linesize[2]
1001 else if(SDL_LockTexture(vp->mImage, nullptr, &pixels, &pitch) != 0)
1002 std::cerr<< "Failed to lock texture" <<std::endl;
1003 else
1005 // Convert the image into YUV format that SDL uses
1006 int coded_w = mCodecCtx->coded_width;
1007 int coded_h = mCodecCtx->coded_height;
1008 int w = mCodecCtx->width;
1009 int h = mCodecCtx->height;
1010 if(!mSwscaleCtx || fmt_updated)
1012 sws_freeContext(mSwscaleCtx);
1013 mSwscaleCtx = sws_getContext(
1014 w, h, mCodecCtx->pix_fmt,
1015 w, h, AV_PIX_FMT_YUV420P, 0,
1016 nullptr, nullptr, nullptr
1020 /* point pict at the queue */
1021 uint8_t *pict_data[3];
1022 pict_data[0] = reinterpret_cast<uint8_t*>(pixels);
1023 pict_data[1] = pict_data[0] + coded_w*coded_h;
1024 pict_data[2] = pict_data[1] + coded_w*coded_h/4;
1026 int pict_linesize[3];
1027 pict_linesize[0] = pitch;
1028 pict_linesize[1] = pitch / 2;
1029 pict_linesize[2] = pitch / 2;
1031 sws_scale(mSwscaleCtx, (const uint8_t**)frame->data,
1032 frame->linesize, 0, h, pict_data, pict_linesize);
1033 SDL_UnlockTexture(vp->mImage);
1037 std::unique_lock<std::mutex> lock(mPictQMutex);
1038 vp->mUpdated = true;
1039 lock.unlock();
1040 mPictQCond.notify_one();
1043 int VideoState::queuePicture(double pts)
1045 /* Wait until we have space for a new pic */
1046 std::unique_lock<std::mutex> lock(mPictQMutex);
1047 while(mPictQSize >= mPictQ.size() && !mMovie->mQuit.load())
1048 mPictQCond.wait(lock);
1049 lock.unlock();
1051 if(mMovie->mQuit.load())
1052 return -1;
1054 Picture *vp = &mPictQ[mPictQWrite];
1056 /* We have to create/update the picture in the main thread */
1057 vp->mUpdated = false;
1058 SDL_Event evt{};
1059 evt.user.type = FF_UPDATE_EVENT;
1060 evt.user.data1 = this;
1061 SDL_PushEvent(&evt);
1063 /* Wait until the picture is updated. */
1064 lock.lock();
1065 while(!vp->mUpdated && !mMovie->mQuit.load())
1066 mPictQCond.wait(lock);
1067 if(mMovie->mQuit.load())
1068 return -1;
1069 vp->mPts = pts;
1071 mPictQWrite = (mPictQWrite+1)%mPictQ.size();
1072 mPictQSize++;
1073 lock.unlock();
1075 return 0;
1078 double VideoState::synchronize(double pts)
1080 double frame_delay;
1082 if(pts == 0.0) /* if we aren't given a pts, set it to the clock */
1083 pts = mClock;
1084 else /* if we have pts, set video clock to it */
1085 mClock = pts;
1087 /* update the video clock */
1088 frame_delay = av_q2d(mCodecCtx->time_base);
1089 /* if we are repeating a frame, adjust clock accordingly */
1090 frame_delay += mDecodedFrame->repeat_pict * (frame_delay * 0.5);
1091 mClock += frame_delay;
1092 return pts;
1095 int VideoState::handler()
1097 mDecodedFrame = av_frame_alloc();
1098 while(!mMovie->mQuit)
1100 while(!mMovie->mQuit)
1102 AVPacket packet{};
1103 if(mQueue.peek(&packet, mMovie->mQuit) <= 0)
1104 goto finish;
1106 int ret = avcodec_send_packet(mCodecCtx, &packet);
1107 if(ret != AVERROR(EAGAIN))
1109 if(ret < 0)
1110 std::cerr<< "Failed to send encoded packet: 0x"<<std::hex<<ret<<std::dec <<std::endl;
1111 mQueue.pop();
1113 av_packet_unref(&packet);
1114 if(ret == 0 || ret == AVERROR(EAGAIN))
1115 break;
1118 /* Decode video frame */
1119 int ret = avcodec_receive_frame(mCodecCtx, mDecodedFrame);
1120 if(ret == AVERROR(EAGAIN))
1121 continue;
1122 if(ret < 0)
1124 std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
1125 break;
1128 double pts = synchronize(
1129 av_q2d(mStream->time_base) * av_frame_get_best_effort_timestamp(mDecodedFrame)
1131 if(queuePicture(pts) < 0)
1132 break;
1133 av_frame_unref(mDecodedFrame);
1135 finish:
1136 mEOS = true;
1137 av_frame_free(&mDecodedFrame);
1139 std::unique_lock<std::mutex> lock(mPictQMutex);
1140 if(mMovie->mQuit)
1142 mPictQRead = 0;
1143 mPictQWrite = 0;
1144 mPictQSize = 0;
1146 while(!mFinalUpdate)
1147 mPictQCond.wait(lock);
1149 return 0;
1153 int MovieState::decode_interrupt_cb(void *ctx)
1155 return reinterpret_cast<MovieState*>(ctx)->mQuit;
1158 bool MovieState::prepare()
1160 mFormatCtx = avformat_alloc_context();
1161 mFormatCtx->interrupt_callback.callback = decode_interrupt_cb;
1162 mFormatCtx->interrupt_callback.opaque = this;
1163 if(avio_open2(&mFormatCtx->pb, mFilename.c_str(), AVIO_FLAG_READ,
1164 &mFormatCtx->interrupt_callback, nullptr))
1166 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1167 return false;
1170 /* Open movie file */
1171 if(avformat_open_input(&mFormatCtx, mFilename.c_str(), nullptr, nullptr) != 0)
1173 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1174 return false;
1177 /* Retrieve stream information */
1178 if(avformat_find_stream_info(mFormatCtx, nullptr) < 0)
1180 std::cerr<< mFilename<<": failed to find stream info" <<std::endl;
1181 return false;
1184 mVideo.schedRefresh(40);
1186 mParseThread = std::thread(std::mem_fn(&MovieState::parse_handler), this);
1187 return true;
1190 void MovieState::setTitle(SDL_Window *window)
1192 auto pos1 = mFilename.rfind('/');
1193 auto pos2 = mFilename.rfind('\\');
1194 auto fpos = ((pos1 == std::string::npos) ? pos2 :
1195 (pos2 == std::string::npos) ? pos1 :
1196 std::max(pos1, pos2)) + 1;
1197 SDL_SetWindowTitle(window, (mFilename.substr(fpos)+" - "+AppName).c_str());
1200 double MovieState::getClock()
1202 return (av_gettime()-mExternalClockBase) / 1000000.0;
1205 double MovieState::getMasterClock()
1207 if(mAVSyncType == AV_SYNC_VIDEO_MASTER)
1208 return mVideo.getClock();
1209 if(mAVSyncType == AV_SYNC_AUDIO_MASTER)
1210 return mAudio.getClock();
1211 return getClock();
1214 int MovieState::streamComponentOpen(int stream_index)
1216 if(stream_index < 0 || (unsigned int)stream_index >= mFormatCtx->nb_streams)
1217 return -1;
1219 /* Get a pointer to the codec context for the stream, and open the
1220 * associated codec.
1222 AVCodecContext *avctx = avcodec_alloc_context3(nullptr);
1223 if(!avctx) return -1;
1225 if(avcodec_parameters_to_context(avctx, mFormatCtx->streams[stream_index]->codecpar))
1227 avcodec_free_context(&avctx);
1228 return -1;
1231 AVCodec *codec = avcodec_find_decoder(avctx->codec_id);
1232 if(!codec || avcodec_open2(avctx, codec, nullptr) < 0)
1234 std::cerr<< "Unsupported codec: "<<avcodec_get_name(avctx->codec_id)
1235 << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl;
1236 avcodec_free_context(&avctx);
1237 return -1;
1240 /* Initialize and start the media type handler */
1241 switch(avctx->codec_type)
1243 case AVMEDIA_TYPE_AUDIO:
1244 mAudioStream = stream_index;
1245 mAudio.mStream = mFormatCtx->streams[stream_index];
1246 mAudio.mCodecCtx = avctx;
1248 /* Averaging filter for audio sync */
1249 mAudio.mDiff.AvgCoeff = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1250 /* Correct audio only if larger error than this */
1251 mAudio.mDiff.Threshold = 0.050/* 50 ms */;
1253 mAudioThread = std::thread(std::mem_fn(&AudioState::handler), &mAudio);
1254 break;
1256 case AVMEDIA_TYPE_VIDEO:
1257 mVideoStream = stream_index;
1258 mVideo.mStream = mFormatCtx->streams[stream_index];
1259 mVideo.mCodecCtx = avctx;
1261 mVideo.mCurrentPtsTime = av_gettime();
1262 mVideo.mFrameTimer = (double)mVideo.mCurrentPtsTime / 1000000.0;
1263 mVideo.mFrameLastDelay = 40e-3;
1265 mVideoThread = std::thread(std::mem_fn(&VideoState::handler), &mVideo);
1266 break;
1268 default:
1269 avcodec_free_context(&avctx);
1270 break;
1273 return 0;
1276 int MovieState::parse_handler()
1278 int video_index = -1;
1279 int audio_index = -1;
1281 mVideoStream = -1;
1282 mAudioStream = -1;
1284 /* Dump information about file onto standard error */
1285 av_dump_format(mFormatCtx, 0, mFilename.c_str(), 0);
1287 /* Find the first video and audio streams */
1288 for(unsigned int i = 0;i < mFormatCtx->nb_streams;i++)
1290 if(mFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0)
1291 video_index = i;
1292 else if(mFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
1293 audio_index = i;
1295 /* Start the external clock in 50ms, to give the audio and video
1296 * components time to start without needing to skip ahead.
1298 mExternalClockBase = av_gettime() + 50000;
1299 if(audio_index >= 0)
1300 streamComponentOpen(audio_index);
1301 if(video_index >= 0)
1302 streamComponentOpen(video_index);
1304 if(mVideoStream < 0 && mAudioStream < 0)
1306 std::cerr<< mFilename<<": could not open codecs" <<std::endl;
1307 mQuit = true;
1310 /* Main packet handling loop */
1311 while(!mQuit.load())
1313 if(mAudio.mQueue.mTotalSize + mVideo.mQueue.mTotalSize >= MAX_QUEUE_SIZE)
1315 std::this_thread::sleep_for(std::chrono::milliseconds(10));
1316 continue;
1319 AVPacket packet;
1320 if(av_read_frame(mFormatCtx, &packet) < 0)
1321 break;
1323 /* Copy the packet in the queue it's meant for. */
1324 if(packet.stream_index == mVideoStream)
1325 mVideo.mQueue.put(&packet);
1326 else if(packet.stream_index == mAudioStream)
1327 mAudio.mQueue.put(&packet);
1328 av_packet_unref(&packet);
1330 mVideo.mQueue.finish();
1331 mAudio.mQueue.finish();
1333 /* all done - wait for it */
1334 if(mVideoThread.joinable())
1335 mVideoThread.join();
1336 if(mAudioThread.joinable())
1337 mAudioThread.join();
1339 mVideo.mEOS = true;
1340 std::unique_lock<std::mutex> lock(mVideo.mPictQMutex);
1341 while(!mVideo.mFinalUpdate)
1342 mVideo.mPictQCond.wait(lock);
1343 lock.unlock();
1345 SDL_Event evt{};
1346 evt.user.type = FF_MOVIE_DONE_EVENT;
1347 SDL_PushEvent(&evt);
1349 return 0;
1352 } // namespace
1355 int main(int argc, char *argv[])
1357 std::unique_ptr<MovieState> movState;
1359 if(argc < 2)
1361 std::cerr<< "Usage: "<<argv[0]<<" [-device <device name>] <files...>" <<std::endl;
1362 return 1;
1364 /* Register all formats and codecs */
1365 av_register_all();
1366 /* Initialize networking protocols */
1367 avformat_network_init();
1369 if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER))
1371 std::cerr<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl;
1372 return 1;
1375 /* Make a window to put our video */
1376 SDL_Window *screen = SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE);
1377 if(!screen)
1379 std::cerr<< "SDL: could not set video mode - exiting" <<std::endl;
1380 return 1;
1382 /* Make a renderer to handle the texture image surface and rendering. */
1383 SDL_Renderer *renderer = SDL_CreateRenderer(screen, -1, SDL_RENDERER_ACCELERATED);
1384 if(renderer)
1386 SDL_RendererInfo rinf{};
1387 bool ok = false;
1389 /* Make sure the renderer supports IYUV textures. If not, fallback to a
1390 * software renderer. */
1391 if(SDL_GetRendererInfo(renderer, &rinf) == 0)
1393 for(Uint32 i = 0;!ok && i < rinf.num_texture_formats;i++)
1394 ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_IYUV);
1396 if(!ok)
1398 std::cerr<< "IYUV pixelformat textures not supported on renderer "<<rinf.name <<std::endl;
1399 SDL_DestroyRenderer(renderer);
1400 renderer = nullptr;
1403 if(!renderer)
1404 renderer = SDL_CreateRenderer(screen, -1, SDL_RENDERER_SOFTWARE);
1405 if(!renderer)
1407 std::cerr<< "SDL: could not create renderer - exiting" <<std::endl;
1408 return 1;
1410 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1411 SDL_RenderFillRect(renderer, nullptr);
1412 SDL_RenderPresent(renderer);
1414 /* Open an audio device */
1415 int fileidx = 1;
1416 ALCdevice *device = [argc,argv,&fileidx]() -> ALCdevice*
1418 ALCdevice *dev = NULL;
1419 if(argc > 3 && strcmp(argv[1], "-device") == 0)
1421 dev = alcOpenDevice(argv[2]);
1422 if(dev)
1424 fileidx = 3;
1425 return dev;
1427 std::cerr<< "Failed to open \""<<argv[2]<<"\" - trying default" <<std::endl;
1429 return alcOpenDevice(nullptr);
1430 }();
1431 ALCcontext *context = alcCreateContext(device, nullptr);
1432 if(!context || alcMakeContextCurrent(context) == ALC_FALSE)
1434 std::cerr<< "Failed to set up audio device" <<std::endl;
1435 if(context)
1436 alcDestroyContext(context);
1437 return 1;
1440 while(fileidx < argc && !movState)
1442 movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
1443 if(!movState->prepare()) movState = nullptr;
1445 if(!movState)
1447 std::cerr<< "Could not start a video" <<std::endl;
1448 return 1;
1450 movState->setTitle(screen);
1452 /* Default to going to the next movie at the end of one. */
1453 enum class EomAction {
1454 Next, Quit
1455 } eom_action = EomAction::Next;
1456 SDL_Event event;
1457 while(SDL_WaitEvent(&event) == 1)
1459 switch(event.type)
1461 case SDL_KEYDOWN:
1462 switch(event.key.keysym.sym)
1464 case SDLK_ESCAPE:
1465 movState->mQuit = true;
1466 eom_action = EomAction::Quit;
1467 break;
1469 case SDLK_n:
1470 movState->mQuit = true;
1471 eom_action = EomAction::Next;
1472 break;
1474 default:
1475 break;
1477 break;
1479 case SDL_WINDOWEVENT:
1480 switch(event.window.event)
1482 case SDL_WINDOWEVENT_RESIZED:
1483 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1484 SDL_RenderFillRect(renderer, nullptr);
1485 break;
1487 default:
1488 break;
1490 break;
1492 case SDL_QUIT:
1493 movState->mQuit = true;
1494 eom_action = EomAction::Quit;
1495 break;
1497 case FF_UPDATE_EVENT:
1498 reinterpret_cast<VideoState*>(event.user.data1)->updatePicture(
1499 screen, renderer
1501 break;
1503 case FF_REFRESH_EVENT:
1504 reinterpret_cast<VideoState*>(event.user.data1)->refreshTimer(
1505 screen, renderer
1507 break;
1509 case FF_MOVIE_DONE_EVENT:
1510 if(eom_action != EomAction::Quit)
1512 movState = nullptr;
1513 while(fileidx < argc && !movState)
1515 movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
1516 if(!movState->prepare()) movState = nullptr;
1518 if(movState)
1520 movState->setTitle(screen);
1521 break;
1525 /* Nothing more to play. Shut everything down and quit. */
1526 movState = nullptr;
1528 alcMakeContextCurrent(nullptr);
1529 alcDestroyContext(context);
1530 alcCloseDevice(device);
1532 SDL_DestroyRenderer(renderer);
1533 renderer = nullptr;
1534 SDL_DestroyWindow(screen);
1535 screen = nullptr;
1537 SDL_Quit();
1538 exit(0);
1540 default:
1541 break;
1545 std::cerr<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl;
1546 return 1;