2 * An example showing how to play a stream sync'd to video, using ffmpeg.
7 #include <condition_variable>
22 #include "libavcodec/avcodec.h"
23 #include "libavformat/avformat.h"
24 #include "libavformat/avio.h"
25 #include "libavutil/time.h"
26 #include "libavutil/pixfmt.h"
27 #include "libavutil/avstring.h"
28 #include "libavutil/channel_layout.h"
29 #include "libswscale/swscale.h"
30 #include "libswresample/swresample.h"
42 static const std::string
AppName("alffplay");
44 static bool do_direct_out
= false;
45 static bool has_latency_check
= false;
46 static LPALGETSOURCEDVSOFT alGetSourcedvSOFT
;
48 #define AUDIO_BUFFER_TIME 100 /* In milliseconds, per-buffer */
49 #define AUDIO_BUFFER_QUEUE_SIZE 8 /* Number of buffers to queue */
50 #define MAX_QUEUE_SIZE (15 * 1024 * 1024) /* Bytes of compressed data to keep queued */
51 #define AV_SYNC_THRESHOLD 0.01
52 #define AV_NOSYNC_THRESHOLD 10.0
53 #define SAMPLE_CORRECTION_MAX_DIFF 0.05
54 #define AUDIO_DIFF_AVG_NB 20
55 #define VIDEO_PICTURE_QUEUE_SIZE 16
58 FF_UPDATE_EVENT
= SDL_USEREVENT
,
66 AV_SYNC_EXTERNAL_MASTER
,
68 DEFAULT_AV_SYNC_TYPE
= AV_SYNC_EXTERNAL_MASTER
73 std::deque
<AVPacket
> mPackets
;
74 std::atomic
<int> mTotalSize
;
75 std::atomic
<bool> mFinished
;
77 std::condition_variable mCond
;
79 PacketQueue() : mTotalSize(0), mFinished(false)
84 int put(const AVPacket
*pkt
);
85 int peek(AVPacket
*pkt
, std::atomic
<bool> &quit_var
);
99 AVCodecContext
*mCodecCtx
;
103 /* Used for clock difference average computation */
105 std::atomic
<int> Clocks
; /* In microseconds */
112 /* Time (in seconds) of the next sample to be buffered */
115 /* Decompressed sample frame, and swresample context for conversion */
116 AVFrame
*mDecodedFrame
;
117 struct SwrContext
*mSwresCtx
;
119 /* Conversion format, for what gets fed to Alure */
121 enum AVSampleFormat mDstSampleFmt
;
123 /* Storage of converted samples */
125 int mSamplesLen
; /* In samples */
133 std::recursive_mutex mSrcMutex
;
135 ALuint mBuffers
[AUDIO_BUFFER_QUEUE_SIZE
];
138 AudioState(MovieState
*movie
)
139 : mMovie(movie
), mStream(nullptr), mCodecCtx(nullptr)
140 , mDiff
{{0}, 0.0, 0.0, 0.0, 0}, mCurrentPts(0.0), mDecodedFrame(nullptr)
141 , mSwresCtx(nullptr), mDstChanLayout(0), mDstSampleFmt(AV_SAMPLE_FMT_NONE
)
142 , mSamples(nullptr), mSamplesLen(0), mSamplesPos(0), mSamplesMax(0)
143 , mFormat(AL_NONE
), mFrameSize(0), mSource(0), mBufferIdx(0)
145 for(auto &buf
: mBuffers
)
151 alDeleteSources(1, &mSource
);
152 alDeleteBuffers(AUDIO_BUFFER_QUEUE_SIZE
, mBuffers
);
154 av_frame_free(&mDecodedFrame
);
155 swr_free(&mSwresCtx
);
159 avcodec_free_context(&mCodecCtx
);
166 int readAudio(uint8_t *samples
, int length
);
175 AVCodecContext
*mCodecCtx
;
181 double mFrameLastPts
;
182 double mFrameLastDelay
;
184 /* time (av_gettime) at which we updated mCurrentPts - used to have running video pts */
185 int64_t mCurrentPtsTime
;
187 /* Decompressed video frame, and swscale context for conversion */
188 AVFrame
*mDecodedFrame
;
189 struct SwsContext
*mSwscaleCtx
;
193 int mWidth
, mHeight
; /* Logical image size (actual size may be larger) */
194 std::atomic
<bool> mUpdated
;
198 : mImage(nullptr), mWidth(0), mHeight(0), mUpdated(false), mPts(0.0)
203 SDL_DestroyTexture(mImage
);
207 std::array
<Picture
,VIDEO_PICTURE_QUEUE_SIZE
> mPictQ
;
208 size_t mPictQSize
, mPictQRead
, mPictQWrite
;
209 std::mutex mPictQMutex
;
210 std::condition_variable mPictQCond
;
212 std::atomic
<bool> mEOS
;
213 std::atomic
<bool> mFinalUpdate
;
215 VideoState(MovieState
*movie
)
216 : mMovie(movie
), mStream(nullptr), mCodecCtx(nullptr), mClock(0.0)
217 , mFrameTimer(0.0), mFrameLastPts(0.0), mFrameLastDelay(0.0)
218 , mCurrentPts(0.0), mCurrentPtsTime(0), mDecodedFrame(nullptr)
219 , mSwscaleCtx(nullptr), mPictQSize(0), mPictQRead(0), mPictQWrite(0)
220 , mFirstUpdate(true), mEOS(false), mFinalUpdate(false)
224 sws_freeContext(mSwscaleCtx
);
225 mSwscaleCtx
= nullptr;
226 av_frame_free(&mDecodedFrame
);
227 avcodec_free_context(&mCodecCtx
);
232 static Uint32 SDLCALL
sdl_refresh_timer_cb(Uint32 interval
, void *opaque
);
233 void schedRefresh(int delay
);
234 void display(SDL_Window
*screen
, SDL_Renderer
*renderer
);
235 void refreshTimer(SDL_Window
*screen
, SDL_Renderer
*renderer
);
236 void updatePicture(SDL_Window
*screen
, SDL_Renderer
*renderer
);
237 int queuePicture(double pts
);
238 double synchronize(double pts
);
243 AVFormatContext
*mFormatCtx
;
244 int mVideoStream
, mAudioStream
;
248 int64_t mExternalClockBase
;
250 std::atomic
<bool> mQuit
;
255 std::thread mParseThread
;
256 std::thread mAudioThread
;
257 std::thread mVideoThread
;
259 std::string mFilename
;
261 MovieState(std::string fname
)
262 : mFormatCtx(nullptr), mVideoStream(0), mAudioStream(0)
263 , mAVSyncType(DEFAULT_AV_SYNC_TYPE
), mExternalClockBase(0), mQuit(false)
264 , mAudio(this), mVideo(this), mFilename(std::move(fname
))
269 if(mParseThread
.joinable())
271 avformat_close_input(&mFormatCtx
);
274 static int decode_interrupt_cb(void *ctx
);
276 void setTitle(SDL_Window
*window
);
280 double getMasterClock();
282 int streamComponentOpen(int stream_index
);
287 int PacketQueue::put(const AVPacket
*pkt
)
289 std::unique_lock
<std::mutex
> lock(mMutex
);
290 mPackets
.push_back(AVPacket
{});
291 if(av_packet_ref(&mPackets
.back(), pkt
) != 0)
296 mTotalSize
+= mPackets
.back().size
;
303 int PacketQueue::peek(AVPacket
*pkt
, std::atomic
<bool> &quit_var
)
305 std::unique_lock
<std::mutex
> lock(mMutex
);
306 while(!quit_var
.load())
308 if(!mPackets
.empty())
310 if(av_packet_ref(pkt
, &mPackets
.front()) != 0)
322 void PacketQueue::pop()
324 std::unique_lock
<std::mutex
> lock(mMutex
);
325 AVPacket
*pkt
= &mPackets
.front();
326 mTotalSize
-= pkt
->size
;
327 av_packet_unref(pkt
);
328 mPackets
.pop_front();
331 void PacketQueue::clear()
333 std::unique_lock
<std::mutex
> lock(mMutex
);
334 std::for_each(mPackets
.begin(), mPackets
.end(),
335 [](AVPacket
&pkt
) { av_packet_unref(&pkt
); }
340 void PacketQueue::finish()
342 std::unique_lock
<std::mutex
> lock(mMutex
);
349 double AudioState::getClock()
353 std::unique_lock
<std::recursive_mutex
> lock(mSrcMutex
);
354 /* The audio clock is the timestamp of the sample currently being heard.
355 * It's based on 4 components:
356 * 1 - The timestamp of the next sample to buffer (state->current_pts)
357 * 2 - The length of the source's buffer queue
358 * 3 - The offset OpenAL is currently at in the source (the first value
359 * from AL_SEC_OFFSET_LATENCY_SOFT)
360 * 4 - The latency between OpenAL and the DAC (the second value from
361 * AL_SEC_OFFSET_LATENCY_SOFT)
363 * Subtracting the length of the source queue from the next sample's
364 * timestamp gives the timestamp of the sample at start of the source
365 * queue. Adding the source offset to that results in the timestamp for
366 * OpenAL's current position, and subtracting the source latency from that
367 * gives the timestamp of the sample currently at the DAC.
376 /* NOTE: The source state must be checked last, in case an underrun
377 * occurs and the source stops between retrieving the offset+latency
378 * and getting the state. */
379 if(has_latency_check
)
381 alGetSourcedvSOFT(mSource
, AL_SEC_OFFSET_LATENCY_SOFT
, offset
);
382 alGetSourcei(mSource
, AL_BUFFERS_QUEUED
, &queue_size
);
387 alGetSourcei(mSource
, AL_SAMPLE_OFFSET
, &ioffset
);
388 alGetSourcei(mSource
, AL_BUFFERS_QUEUED
, &queue_size
);
389 offset
[0] = (double)ioffset
/ (double)mCodecCtx
->sample_rate
;
392 alGetSourcei(mSource
, AL_SOURCE_STATE
, &status
);
394 /* If the source is AL_STOPPED, then there was an underrun and all
395 * buffers are processed, so ignore the source queue. The audio thread
396 * will put the source into an AL_INITIAL state and clear the queue
397 * when it starts recovery. */
398 if(status
!= AL_STOPPED
)
399 pts
-= queue_size
*((double)AUDIO_BUFFER_TIME
/1000.0) - offset
[0];
400 if(status
== AL_PLAYING
)
405 return std::max(pts
, 0.0);
408 int AudioState::getSync()
410 double diff
, avg_diff
, ref_clock
;
412 if(mMovie
->mAVSyncType
== AV_SYNC_AUDIO_MASTER
)
415 ref_clock
= mMovie
->getMasterClock();
416 diff
= ref_clock
- getClock();
418 if(!(fabs(diff
) < AV_NOSYNC_THRESHOLD
))
420 /* Difference is TOO big; reset diff stuff */
425 /* Accumulate the diffs */
426 mDiff
.Accum
= mDiff
.Accum
*mDiff
.AvgCoeff
+ diff
;
427 avg_diff
= mDiff
.Accum
*(1.0 - mDiff
.AvgCoeff
);
428 if(fabs(avg_diff
) < mDiff
.Threshold
)
431 /* Constrain the per-update difference to avoid exceedingly large skips */
432 if(!(diff
<= SAMPLE_CORRECTION_MAX_DIFF
))
433 diff
= SAMPLE_CORRECTION_MAX_DIFF
;
434 else if(!(diff
>= -SAMPLE_CORRECTION_MAX_DIFF
))
435 diff
= -SAMPLE_CORRECTION_MAX_DIFF
;
436 return (int)(diff
*mCodecCtx
->sample_rate
);
439 int AudioState::decodeFrame()
441 while(!mMovie
->mQuit
.load())
443 while(!mMovie
->mQuit
.load())
445 /* Get the next packet */
447 if(mQueue
.peek(&pkt
, mMovie
->mQuit
) <= 0)
450 int ret
= avcodec_send_packet(mCodecCtx
, &pkt
);
451 if(ret
!= AVERROR(EAGAIN
))
454 std::cerr
<< "Failed to send encoded packet: 0x"<<std::hex
<<ret
<<std::dec
<<std::endl
;
457 av_packet_unref(&pkt
);
458 if(ret
== 0 || ret
== AVERROR(EAGAIN
))
462 int ret
= avcodec_receive_frame(mCodecCtx
, mDecodedFrame
);
463 if(ret
== AVERROR(EAGAIN
))
465 if(ret
== AVERROR_EOF
|| ret
< 0)
467 std::cerr
<< "Failed to decode frame: "<<ret
<<std::endl
;
471 if(mDecodedFrame
->nb_samples
<= 0)
473 av_frame_unref(mDecodedFrame
);
477 /* If provided, update w/ pts */
478 int64_t pts
= av_frame_get_best_effort_timestamp(mDecodedFrame
);
479 if(pts
!= AV_NOPTS_VALUE
)
480 mCurrentPts
= av_q2d(mStream
->time_base
)*pts
;
482 if(mDecodedFrame
->nb_samples
> mSamplesMax
)
486 &mSamples
, nullptr, mCodecCtx
->channels
,
487 mDecodedFrame
->nb_samples
, mDstSampleFmt
, 0
489 mSamplesMax
= mDecodedFrame
->nb_samples
;
491 /* Return the amount of sample frames converted */
492 int data_size
= swr_convert(mSwresCtx
, &mSamples
, mDecodedFrame
->nb_samples
,
493 (const uint8_t**)mDecodedFrame
->data
, mDecodedFrame
->nb_samples
496 av_frame_unref(mDecodedFrame
);
503 /* Duplicates the sample at in to out, count times. The frame size is a
504 * multiple of the template type size.
507 static void sample_dup(uint8_t *out
, const uint8_t *in
, int count
, int frame_size
)
509 const T
*sample
= reinterpret_cast<const T
*>(in
);
510 T
*dst
= reinterpret_cast<T
*>(out
);
511 if(frame_size
== sizeof(T
))
512 std::fill_n(dst
, count
, *sample
);
515 /* NOTE: frame_size is a multiple of sizeof(T). */
516 int type_mult
= frame_size
/ sizeof(T
);
518 std::generate_n(dst
, count
*type_mult
,
519 [sample
,type_mult
,&i
]() -> T
530 int AudioState::readAudio(uint8_t *samples
, int length
)
532 int sample_skip
= getSync();
535 /* Read the next chunk of data, refill the buffer, and queue it
537 length
/= mFrameSize
;
538 while(audio_size
< length
)
540 if(mSamplesLen
<= 0 || mSamplesPos
>= mSamplesLen
)
542 int frame_len
= decodeFrame();
543 if(frame_len
<= 0) break;
545 mSamplesLen
= frame_len
;
546 mSamplesPos
= std::min(mSamplesLen
, sample_skip
);
547 sample_skip
-= mSamplesPos
;
549 mCurrentPts
+= (double)mSamplesPos
/ (double)mCodecCtx
->sample_rate
;
553 int rem
= length
- audio_size
;
556 int len
= mSamplesLen
- mSamplesPos
;
557 if(rem
> len
) rem
= len
;
558 memcpy(samples
, mSamples
+ mSamplesPos
*mFrameSize
, rem
*mFrameSize
);
562 rem
= std::min(rem
, -mSamplesPos
);
564 /* Add samples by copying the first sample */
565 if((mFrameSize
&7) == 0)
566 sample_dup
<uint64_t>(samples
, mSamples
, rem
, mFrameSize
);
567 else if((mFrameSize
&3) == 0)
568 sample_dup
<uint32_t>(samples
, mSamples
, rem
, mFrameSize
);
569 else if((mFrameSize
&1) == 0)
570 sample_dup
<uint16_t>(samples
, mSamples
, rem
, mFrameSize
);
572 sample_dup
<uint8_t>(samples
, mSamples
, rem
, mFrameSize
);
576 mCurrentPts
+= (double)rem
/ mCodecCtx
->sample_rate
;
577 samples
+= rem
*mFrameSize
;
581 if(audio_size
< length
&& audio_size
> 0)
583 int rem
= length
- audio_size
;
584 std::fill_n(samples
, rem
*mFrameSize
,
585 (mDstSampleFmt
== AV_SAMPLE_FMT_U8
) ? 0x80 : 0x00);
586 mCurrentPts
+= (double)rem
/ mCodecCtx
->sample_rate
;
590 return audio_size
* mFrameSize
;
594 int AudioState::handler()
596 std::unique_lock
<std::recursive_mutex
> lock(mSrcMutex
);
599 /* Find a suitable format for Alure. */
601 if(mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_U8
|| mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_U8P
)
603 mDstSampleFmt
= AV_SAMPLE_FMT_U8
;
605 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_7POINT1
&&
606 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
607 (fmt
=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE
&& fmt
!= -1)
609 mDstChanLayout
= mCodecCtx
->channel_layout
;
613 if((mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1
||
614 mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1_BACK
) &&
615 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
616 (fmt
=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE
&& fmt
!= -1)
618 mDstChanLayout
= mCodecCtx
->channel_layout
;
622 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_MONO
)
624 mDstChanLayout
= mCodecCtx
->channel_layout
;
626 mFormat
= AL_FORMAT_MONO8
;
630 mDstChanLayout
= AV_CH_LAYOUT_STEREO
;
632 mFormat
= AL_FORMAT_STEREO8
;
635 if((mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_FLT
|| mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_FLTP
) &&
636 alIsExtensionPresent("AL_EXT_FLOAT32"))
638 mDstSampleFmt
= AV_SAMPLE_FMT_FLT
;
640 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_7POINT1
&&
641 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
642 (fmt
=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE
&& fmt
!= -1)
644 mDstChanLayout
= mCodecCtx
->channel_layout
;
648 if((mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1
||
649 mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1_BACK
) &&
650 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
651 (fmt
=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE
&& fmt
!= -1)
653 mDstChanLayout
= mCodecCtx
->channel_layout
;
657 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_MONO
)
659 mDstChanLayout
= mCodecCtx
->channel_layout
;
661 mFormat
= AL_FORMAT_MONO_FLOAT32
;
665 mDstChanLayout
= AV_CH_LAYOUT_STEREO
;
667 mFormat
= AL_FORMAT_STEREO_FLOAT32
;
672 mDstSampleFmt
= AV_SAMPLE_FMT_S16
;
674 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_7POINT1
&&
675 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
676 (fmt
=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE
&& fmt
!= -1)
678 mDstChanLayout
= mCodecCtx
->channel_layout
;
682 if((mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1
||
683 mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1_BACK
) &&
684 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
685 (fmt
=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE
&& fmt
!= -1)
687 mDstChanLayout
= mCodecCtx
->channel_layout
;
691 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_MONO
)
693 mDstChanLayout
= mCodecCtx
->channel_layout
;
695 mFormat
= AL_FORMAT_MONO16
;
699 mDstChanLayout
= AV_CH_LAYOUT_STEREO
;
701 mFormat
= AL_FORMAT_STEREO16
;
704 ALsizei buffer_len
= mCodecCtx
->sample_rate
* AUDIO_BUFFER_TIME
/ 1000 *
706 void *samples
= av_malloc(buffer_len
);
713 if(!(mDecodedFrame
=av_frame_alloc()))
715 std::cerr
<< "Failed to allocate audio frame" <<std::endl
;
719 mSwresCtx
= swr_alloc_set_opts(nullptr,
720 mDstChanLayout
, mDstSampleFmt
, mCodecCtx
->sample_rate
,
721 mCodecCtx
->channel_layout
? mCodecCtx
->channel_layout
:
722 (uint64_t)av_get_default_channel_layout(mCodecCtx
->channels
),
723 mCodecCtx
->sample_fmt
, mCodecCtx
->sample_rate
,
726 if(!mSwresCtx
|| swr_init(mSwresCtx
) != 0)
728 std::cerr
<< "Failed to initialize audio converter" <<std::endl
;
732 alGenBuffers(AUDIO_BUFFER_QUEUE_SIZE
, mBuffers
);
733 alGenSources(1, &mSource
);
737 if(!alIsExtensionPresent("AL_SOFT_direct_channels"))
738 std::cerr
<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl
;
741 alSourcei(mSource
, AL_DIRECT_CHANNELS_SOFT
, AL_TRUE
);
742 std::cout
<< "Direct out enabled" <<std::endl
;
746 while(alGetError() == AL_NO_ERROR
&& !mMovie
->mQuit
.load())
748 /* First remove any processed buffers. */
750 alGetSourcei(mSource
, AL_BUFFERS_PROCESSED
, &processed
);
753 std::array
<ALuint
,AUDIO_BUFFER_QUEUE_SIZE
> tmp
;
754 alSourceUnqueueBuffers(mSource
, processed
, tmp
.data());
757 /* Refill the buffer queue. */
759 alGetSourcei(mSource
, AL_BUFFERS_QUEUED
, &queued
);
760 while(queued
< AUDIO_BUFFER_QUEUE_SIZE
)
764 /* Read the next chunk of data, fill the buffer, and queue it on
766 audio_size
= readAudio(reinterpret_cast<uint8_t*>(samples
), buffer_len
);
767 if(audio_size
<= 0) break;
769 ALuint bufid
= mBuffers
[mBufferIdx
++];
770 mBufferIdx
%= AUDIO_BUFFER_QUEUE_SIZE
;
772 alBufferData(bufid
, mFormat
, samples
, audio_size
, mCodecCtx
->sample_rate
);
773 alSourceQueueBuffers(mSource
, 1, &bufid
);
779 /* Check that the source is playing. */
781 alGetSourcei(mSource
, AL_SOURCE_STATE
, &state
);
782 if(state
== AL_STOPPED
)
784 /* AL_STOPPED means there was an underrun. Rewind the source to get
785 * it back into an AL_INITIAL state.
787 alSourceRewind(mSource
);
793 /* (re)start the source if needed, and wait for a buffer to finish */
794 if(state
!= AL_PLAYING
&& state
!= AL_PAUSED
)
795 alSourcePlay(mSource
);
796 SDL_Delay(AUDIO_BUFFER_TIME
/ 3);
802 alSourceRewind(mSource
);
803 alSourcei(mSource
, AL_BUFFER
, 0);
805 av_frame_free(&mDecodedFrame
);
806 swr_free(&mSwresCtx
);
814 double VideoState::getClock()
816 double delta
= (av_gettime() - mCurrentPtsTime
) / 1000000.0;
817 return mCurrentPts
+ delta
;
820 Uint32 SDLCALL
VideoState::sdl_refresh_timer_cb(Uint32
/*interval*/, void *opaque
)
823 evt
.user
.type
= FF_REFRESH_EVENT
;
824 evt
.user
.data1
= opaque
;
826 return 0; /* 0 means stop timer */
829 /* Schedules an FF_REFRESH_EVENT event to occur in 'delay' ms. */
830 void VideoState::schedRefresh(int delay
)
832 SDL_AddTimer(delay
, sdl_refresh_timer_cb
, this);
835 /* Called by VideoState::refreshTimer to display the next video frame. */
836 void VideoState::display(SDL_Window
*screen
, SDL_Renderer
*renderer
)
838 Picture
*vp
= &mPictQ
[mPictQRead
];
847 if(mCodecCtx
->sample_aspect_ratio
.num
== 0)
851 aspect_ratio
= av_q2d(mCodecCtx
->sample_aspect_ratio
) * mCodecCtx
->width
/
854 if(aspect_ratio
<= 0.0f
)
855 aspect_ratio
= (float)mCodecCtx
->width
/ (float)mCodecCtx
->height
;
857 SDL_GetWindowSize(screen
, &win_w
, &win_h
);
859 w
= ((int)rint(h
* aspect_ratio
) + 3) & ~3;
863 h
= ((int)rint(w
/ aspect_ratio
) + 3) & ~3;
868 SDL_Rect src_rect
{ 0, 0, vp
->mWidth
, vp
->mHeight
};
869 SDL_Rect dst_rect
{ x
, y
, w
, h
};
870 SDL_RenderCopy(renderer
, vp
->mImage
, &src_rect
, &dst_rect
);
871 SDL_RenderPresent(renderer
);
874 /* FF_REFRESH_EVENT handler called on the main thread where the SDL_Renderer
875 * was created. It handles the display of the next decoded video frame (if not
876 * falling behind), and sets up the timer for the following video frame.
878 void VideoState::refreshTimer(SDL_Window
*screen
, SDL_Renderer
*renderer
)
885 std::unique_lock
<std::mutex
>(mPictQMutex
).unlock();
886 mPictQCond
.notify_all();
893 std::unique_lock
<std::mutex
> lock(mPictQMutex
);
902 mPictQCond
.notify_all();
906 Picture
*vp
= &mPictQ
[mPictQRead
];
907 mCurrentPts
= vp
->mPts
;
908 mCurrentPtsTime
= av_gettime();
910 /* Get delay using the frame pts and the pts from last frame. */
911 double delay
= vp
->mPts
- mFrameLastPts
;
912 if(delay
<= 0 || delay
>= 1.0)
914 /* If incorrect delay, use previous one. */
915 delay
= mFrameLastDelay
;
917 /* Save for next frame. */
918 mFrameLastDelay
= delay
;
919 mFrameLastPts
= vp
->mPts
;
921 /* Update delay to sync to clock if not master source. */
922 if(mMovie
->mAVSyncType
!= AV_SYNC_VIDEO_MASTER
)
924 double ref_clock
= mMovie
->getMasterClock();
925 double diff
= vp
->mPts
- ref_clock
;
927 /* Skip or repeat the frame. Take delay into account. */
928 double sync_threshold
= std::min(delay
, AV_SYNC_THRESHOLD
);
929 if(fabs(diff
) < AV_NOSYNC_THRESHOLD
)
931 if(diff
<= -sync_threshold
)
933 else if(diff
>= sync_threshold
)
938 mFrameTimer
+= delay
;
939 /* Compute the REAL delay. */
940 double actual_delay
= mFrameTimer
- (av_gettime() / 1000000.0);
941 if(!(actual_delay
>= 0.010))
943 /* We don't have time to handle this picture, just skip to the next one. */
944 mPictQRead
= (mPictQRead
+1)%mPictQ
.size();
948 schedRefresh((int)(actual_delay
*1000.0 + 0.5));
950 /* Show the picture! */
951 display(screen
, renderer
);
953 /* Update queue for next picture. */
954 mPictQRead
= (mPictQRead
+1)%mPictQ
.size();
957 mPictQCond
.notify_all();
960 /* FF_UPDATE_EVENT handler, updates the picture's texture. It's called on the
961 * main thread where the renderer was created.
963 void VideoState::updatePicture(SDL_Window
*screen
, SDL_Renderer
*renderer
)
965 Picture
*vp
= &mPictQ
[mPictQWrite
];
966 bool fmt_updated
= false;
968 /* allocate or resize the buffer! */
969 if(!vp
->mImage
|| vp
->mWidth
!= mCodecCtx
->width
|| vp
->mHeight
!= mCodecCtx
->height
)
973 SDL_DestroyTexture(vp
->mImage
);
974 vp
->mImage
= SDL_CreateTexture(
975 renderer
, SDL_PIXELFORMAT_IYUV
, SDL_TEXTUREACCESS_STREAMING
,
976 mCodecCtx
->coded_width
, mCodecCtx
->coded_height
979 std::cerr
<< "Failed to create YV12 texture!" <<std::endl
;
980 vp
->mWidth
= mCodecCtx
->width
;
981 vp
->mHeight
= mCodecCtx
->height
;
983 if(mFirstUpdate
&& vp
->mWidth
> 0 && vp
->mHeight
> 0)
985 /* For the first update, set the window size to the video size. */
986 mFirstUpdate
= false;
990 if(mCodecCtx
->sample_aspect_ratio
.den
!= 0)
992 double aspect_ratio
= av_q2d(mCodecCtx
->sample_aspect_ratio
);
993 if(aspect_ratio
>= 1.0)
994 w
= (int)(w
*aspect_ratio
+ 0.5);
995 else if(aspect_ratio
> 0.0)
996 h
= (int)(h
/aspect_ratio
+ 0.5);
998 SDL_SetWindowSize(screen
, w
, h
);
1004 AVFrame
*frame
= mDecodedFrame
;
1005 void *pixels
= nullptr;
1008 if(mCodecCtx
->pix_fmt
== AV_PIX_FMT_YUV420P
)
1009 SDL_UpdateYUVTexture(vp
->mImage
, nullptr,
1010 frame
->data
[0], frame
->linesize
[0],
1011 frame
->data
[1], frame
->linesize
[1],
1012 frame
->data
[2], frame
->linesize
[2]
1014 else if(SDL_LockTexture(vp
->mImage
, nullptr, &pixels
, &pitch
) != 0)
1015 std::cerr
<< "Failed to lock texture" <<std::endl
;
1018 // Convert the image into YUV format that SDL uses
1019 int coded_w
= mCodecCtx
->coded_width
;
1020 int coded_h
= mCodecCtx
->coded_height
;
1021 int w
= mCodecCtx
->width
;
1022 int h
= mCodecCtx
->height
;
1023 if(!mSwscaleCtx
|| fmt_updated
)
1025 sws_freeContext(mSwscaleCtx
);
1026 mSwscaleCtx
= sws_getContext(
1027 w
, h
, mCodecCtx
->pix_fmt
,
1028 w
, h
, AV_PIX_FMT_YUV420P
, 0,
1029 nullptr, nullptr, nullptr
1033 /* point pict at the queue */
1034 uint8_t *pict_data
[3];
1035 pict_data
[0] = reinterpret_cast<uint8_t*>(pixels
);
1036 pict_data
[1] = pict_data
[0] + coded_w
*coded_h
;
1037 pict_data
[2] = pict_data
[1] + coded_w
*coded_h
/4;
1039 int pict_linesize
[3];
1040 pict_linesize
[0] = pitch
;
1041 pict_linesize
[1] = pitch
/ 2;
1042 pict_linesize
[2] = pitch
/ 2;
1044 sws_scale(mSwscaleCtx
, (const uint8_t**)frame
->data
,
1045 frame
->linesize
, 0, h
, pict_data
, pict_linesize
);
1046 SDL_UnlockTexture(vp
->mImage
);
1050 std::unique_lock
<std::mutex
> lock(mPictQMutex
);
1051 vp
->mUpdated
= true;
1053 mPictQCond
.notify_one();
1056 int VideoState::queuePicture(double pts
)
1058 /* Wait until we have space for a new pic */
1059 std::unique_lock
<std::mutex
> lock(mPictQMutex
);
1060 while(mPictQSize
>= mPictQ
.size() && !mMovie
->mQuit
.load())
1061 mPictQCond
.wait(lock
);
1064 if(mMovie
->mQuit
.load())
1067 Picture
*vp
= &mPictQ
[mPictQWrite
];
1069 /* We have to create/update the picture in the main thread */
1070 vp
->mUpdated
= false;
1072 evt
.user
.type
= FF_UPDATE_EVENT
;
1073 evt
.user
.data1
= this;
1074 SDL_PushEvent(&evt
);
1076 /* Wait until the picture is updated. */
1078 while(!vp
->mUpdated
&& !mMovie
->mQuit
.load())
1079 mPictQCond
.wait(lock
);
1080 if(mMovie
->mQuit
.load())
1084 mPictQWrite
= (mPictQWrite
+1)%mPictQ
.size();
1091 double VideoState::synchronize(double pts
)
1095 if(pts
== 0.0) /* if we aren't given a pts, set it to the clock */
1097 else /* if we have pts, set video clock to it */
1100 /* update the video clock */
1101 frame_delay
= av_q2d(mCodecCtx
->time_base
);
1102 /* if we are repeating a frame, adjust clock accordingly */
1103 frame_delay
+= mDecodedFrame
->repeat_pict
* (frame_delay
* 0.5);
1104 mClock
+= frame_delay
;
1108 int VideoState::handler()
1110 mDecodedFrame
= av_frame_alloc();
1111 while(!mMovie
->mQuit
)
1113 while(!mMovie
->mQuit
)
1116 if(mQueue
.peek(&packet
, mMovie
->mQuit
) <= 0)
1119 int ret
= avcodec_send_packet(mCodecCtx
, &packet
);
1120 if(ret
!= AVERROR(EAGAIN
))
1123 std::cerr
<< "Failed to send encoded packet: 0x"<<std::hex
<<ret
<<std::dec
<<std::endl
;
1126 av_packet_unref(&packet
);
1127 if(ret
== 0 || ret
== AVERROR(EAGAIN
))
1131 /* Decode video frame */
1132 int ret
= avcodec_receive_frame(mCodecCtx
, mDecodedFrame
);
1133 if(ret
== AVERROR(EAGAIN
))
1137 std::cerr
<< "Failed to decode frame: "<<ret
<<std::endl
;
1141 double pts
= synchronize(
1142 av_q2d(mStream
->time_base
) * av_frame_get_best_effort_timestamp(mDecodedFrame
)
1144 if(queuePicture(pts
) < 0)
1146 av_frame_unref(mDecodedFrame
);
1150 av_frame_free(&mDecodedFrame
);
1152 std::unique_lock
<std::mutex
> lock(mPictQMutex
);
1159 while(!mFinalUpdate
)
1160 mPictQCond
.wait(lock
);
1166 int MovieState::decode_interrupt_cb(void *ctx
)
1168 return reinterpret_cast<MovieState
*>(ctx
)->mQuit
;
1171 bool MovieState::prepare()
1173 mFormatCtx
= avformat_alloc_context();
1174 mFormatCtx
->interrupt_callback
.callback
= decode_interrupt_cb
;
1175 mFormatCtx
->interrupt_callback
.opaque
= this;
1176 if(avio_open2(&mFormatCtx
->pb
, mFilename
.c_str(), AVIO_FLAG_READ
,
1177 &mFormatCtx
->interrupt_callback
, nullptr))
1179 std::cerr
<< "Failed to open "<<mFilename
<<std::endl
;
1183 /* Open movie file */
1184 if(avformat_open_input(&mFormatCtx
, mFilename
.c_str(), nullptr, nullptr) != 0)
1186 std::cerr
<< "Failed to open "<<mFilename
<<std::endl
;
1190 /* Retrieve stream information */
1191 if(avformat_find_stream_info(mFormatCtx
, nullptr) < 0)
1193 std::cerr
<< mFilename
<<": failed to find stream info" <<std::endl
;
1197 mVideo
.schedRefresh(40);
1199 mParseThread
= std::thread(std::mem_fn(&MovieState::parse_handler
), this);
1203 void MovieState::setTitle(SDL_Window
*window
)
1205 auto pos1
= mFilename
.rfind('/');
1206 auto pos2
= mFilename
.rfind('\\');
1207 auto fpos
= ((pos1
== std::string::npos
) ? pos2
:
1208 (pos2
== std::string::npos
) ? pos1
:
1209 std::max(pos1
, pos2
)) + 1;
1210 SDL_SetWindowTitle(window
, (mFilename
.substr(fpos
)+" - "+AppName
).c_str());
1213 double MovieState::getClock()
1215 return (av_gettime()-mExternalClockBase
) / 1000000.0;
1218 double MovieState::getMasterClock()
1220 if(mAVSyncType
== AV_SYNC_VIDEO_MASTER
)
1221 return mVideo
.getClock();
1222 if(mAVSyncType
== AV_SYNC_AUDIO_MASTER
)
1223 return mAudio
.getClock();
1227 int MovieState::streamComponentOpen(int stream_index
)
1229 if(stream_index
< 0 || (unsigned int)stream_index
>= mFormatCtx
->nb_streams
)
1232 /* Get a pointer to the codec context for the stream, and open the
1235 AVCodecContext
*avctx
= avcodec_alloc_context3(nullptr);
1236 if(!avctx
) return -1;
1238 if(avcodec_parameters_to_context(avctx
, mFormatCtx
->streams
[stream_index
]->codecpar
))
1240 avcodec_free_context(&avctx
);
1244 AVCodec
*codec
= avcodec_find_decoder(avctx
->codec_id
);
1245 if(!codec
|| avcodec_open2(avctx
, codec
, nullptr) < 0)
1247 std::cerr
<< "Unsupported codec: "<<avcodec_get_name(avctx
->codec_id
)
1248 << " (0x"<<std::hex
<<avctx
->codec_id
<<std::dec
<<")" <<std::endl
;
1249 avcodec_free_context(&avctx
);
1253 /* Initialize and start the media type handler */
1254 switch(avctx
->codec_type
)
1256 case AVMEDIA_TYPE_AUDIO
:
1257 mAudioStream
= stream_index
;
1258 mAudio
.mStream
= mFormatCtx
->streams
[stream_index
];
1259 mAudio
.mCodecCtx
= avctx
;
1261 /* Averaging filter for audio sync */
1262 mAudio
.mDiff
.AvgCoeff
= exp(log(0.01) / AUDIO_DIFF_AVG_NB
);
1263 /* Correct audio only if larger error than this */
1264 mAudio
.mDiff
.Threshold
= 0.050/* 50 ms */;
1266 mAudioThread
= std::thread(std::mem_fn(&AudioState::handler
), &mAudio
);
1269 case AVMEDIA_TYPE_VIDEO
:
1270 mVideoStream
= stream_index
;
1271 mVideo
.mStream
= mFormatCtx
->streams
[stream_index
];
1272 mVideo
.mCodecCtx
= avctx
;
1274 mVideo
.mCurrentPtsTime
= av_gettime();
1275 mVideo
.mFrameTimer
= (double)mVideo
.mCurrentPtsTime
/ 1000000.0;
1276 mVideo
.mFrameLastDelay
= 40e-3;
1278 mVideoThread
= std::thread(std::mem_fn(&VideoState::handler
), &mVideo
);
1282 avcodec_free_context(&avctx
);
1289 int MovieState::parse_handler()
1291 int video_index
= -1;
1292 int audio_index
= -1;
1297 /* Dump information about file onto standard error */
1298 av_dump_format(mFormatCtx
, 0, mFilename
.c_str(), 0);
1300 /* Find the first video and audio streams */
1301 for(unsigned int i
= 0;i
< mFormatCtx
->nb_streams
;i
++)
1303 if(mFormatCtx
->streams
[i
]->codecpar
->codec_type
== AVMEDIA_TYPE_VIDEO
&& video_index
< 0)
1305 else if(mFormatCtx
->streams
[i
]->codecpar
->codec_type
== AVMEDIA_TYPE_AUDIO
&& audio_index
< 0)
1308 /* Start the external clock in 50ms, to give the audio and video
1309 * components time to start without needing to skip ahead.
1311 mExternalClockBase
= av_gettime() + 50000;
1312 if(audio_index
>= 0)
1313 streamComponentOpen(audio_index
);
1314 if(video_index
>= 0)
1315 streamComponentOpen(video_index
);
1317 if(mVideoStream
< 0 && mAudioStream
< 0)
1319 std::cerr
<< mFilename
<<": could not open codecs" <<std::endl
;
1323 /* Main packet handling loop */
1324 while(!mQuit
.load())
1326 if(mAudio
.mQueue
.mTotalSize
+ mVideo
.mQueue
.mTotalSize
>= MAX_QUEUE_SIZE
)
1328 std::this_thread::sleep_for(std::chrono::milliseconds(10));
1333 if(av_read_frame(mFormatCtx
, &packet
) < 0)
1336 /* Copy the packet in the queue it's meant for. */
1337 if(packet
.stream_index
== mVideoStream
)
1338 mVideo
.mQueue
.put(&packet
);
1339 else if(packet
.stream_index
== mAudioStream
)
1340 mAudio
.mQueue
.put(&packet
);
1341 av_packet_unref(&packet
);
1343 mVideo
.mQueue
.finish();
1344 mAudio
.mQueue
.finish();
1346 /* all done - wait for it */
1347 if(mVideoThread
.joinable())
1348 mVideoThread
.join();
1349 if(mAudioThread
.joinable())
1350 mAudioThread
.join();
1353 std::unique_lock
<std::mutex
> lock(mVideo
.mPictQMutex
);
1354 while(!mVideo
.mFinalUpdate
)
1355 mVideo
.mPictQCond
.wait(lock
);
1359 evt
.user
.type
= FF_MOVIE_DONE_EVENT
;
1360 SDL_PushEvent(&evt
);
1368 int main(int argc
, char *argv
[])
1370 std::unique_ptr
<MovieState
> movState
;
1374 std::cerr
<< "Usage: "<<argv
[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl
;
1377 /* Register all formats and codecs */
1379 /* Initialize networking protocols */
1380 avformat_network_init();
1382 if(SDL_Init(SDL_INIT_VIDEO
| SDL_INIT_TIMER
))
1384 std::cerr
<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl
;
1388 /* Make a window to put our video */
1389 SDL_Window
*screen
= SDL_CreateWindow(AppName
.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE
);
1392 std::cerr
<< "SDL: could not set video mode - exiting" <<std::endl
;
1395 /* Make a renderer to handle the texture image surface and rendering. */
1396 SDL_Renderer
*renderer
= SDL_CreateRenderer(screen
, -1, SDL_RENDERER_ACCELERATED
);
1399 SDL_RendererInfo rinf
{};
1402 /* Make sure the renderer supports IYUV textures. If not, fallback to a
1403 * software renderer. */
1404 if(SDL_GetRendererInfo(renderer
, &rinf
) == 0)
1406 for(Uint32 i
= 0;!ok
&& i
< rinf
.num_texture_formats
;i
++)
1407 ok
= (rinf
.texture_formats
[i
] == SDL_PIXELFORMAT_IYUV
);
1411 std::cerr
<< "IYUV pixelformat textures not supported on renderer "<<rinf
.name
<<std::endl
;
1412 SDL_DestroyRenderer(renderer
);
1417 renderer
= SDL_CreateRenderer(screen
, -1, SDL_RENDERER_SOFTWARE
);
1420 std::cerr
<< "SDL: could not create renderer - exiting" <<std::endl
;
1423 SDL_SetRenderDrawColor(renderer
, 0, 0, 0, 255);
1424 SDL_RenderFillRect(renderer
, nullptr);
1425 SDL_RenderPresent(renderer
);
1427 /* Open an audio device */
1429 ALCdevice
*device
= [argc
,argv
,&fileidx
]() -> ALCdevice
*
1431 ALCdevice
*dev
= NULL
;
1432 if(argc
> 3 && strcmp(argv
[1], "-device") == 0)
1435 dev
= alcOpenDevice(argv
[2]);
1437 std::cerr
<< "Failed to open \""<<argv
[2]<<"\" - trying default" <<std::endl
;
1439 return alcOpenDevice(nullptr);
1441 ALCcontext
*context
= alcCreateContext(device
, nullptr);
1442 if(!context
|| alcMakeContextCurrent(context
) == ALC_FALSE
)
1444 std::cerr
<< "Failed to set up audio device" <<std::endl
;
1446 alcDestroyContext(context
);
1450 const ALCchar
*name
= nullptr;
1451 if(alcIsExtensionPresent(device
, "ALC_ENUMERATE_ALL_EXT"))
1452 name
= alcGetString(device
, ALC_ALL_DEVICES_SPECIFIER
);
1453 if(!name
|| alcGetError(device
) != AL_NO_ERROR
)
1454 name
= alcGetString(device
, ALC_DEVICE_SPECIFIER
);
1455 std::cout
<< "Opened \""<<name
<<"\"" <<std::endl
;
1457 if(fileidx
< argc
&& strcmp(argv
[fileidx
], "-direct") == 0)
1460 do_direct_out
= true;
1463 while(fileidx
< argc
&& !movState
)
1465 movState
= std::unique_ptr
<MovieState
>(new MovieState(argv
[fileidx
++]));
1466 if(!movState
->prepare()) movState
= nullptr;
1470 std::cerr
<< "Could not start a video" <<std::endl
;
1473 movState
->setTitle(screen
);
1475 /* Default to going to the next movie at the end of one. */
1476 enum class EomAction
{
1478 } eom_action
= EomAction::Next
;
1480 while(SDL_WaitEvent(&event
) == 1)
1485 switch(event
.key
.keysym
.sym
)
1488 movState
->mQuit
= true;
1489 eom_action
= EomAction::Quit
;
1493 movState
->mQuit
= true;
1494 eom_action
= EomAction::Next
;
1502 case SDL_WINDOWEVENT
:
1503 switch(event
.window
.event
)
1505 case SDL_WINDOWEVENT_RESIZED
:
1506 SDL_SetRenderDrawColor(renderer
, 0, 0, 0, 255);
1507 SDL_RenderFillRect(renderer
, nullptr);
1516 movState
->mQuit
= true;
1517 eom_action
= EomAction::Quit
;
1520 case FF_UPDATE_EVENT
:
1521 reinterpret_cast<VideoState
*>(event
.user
.data1
)->updatePicture(
1526 case FF_REFRESH_EVENT
:
1527 reinterpret_cast<VideoState
*>(event
.user
.data1
)->refreshTimer(
1532 case FF_MOVIE_DONE_EVENT
:
1533 if(eom_action
!= EomAction::Quit
)
1536 while(fileidx
< argc
&& !movState
)
1538 movState
= std::unique_ptr
<MovieState
>(new MovieState(argv
[fileidx
++]));
1539 if(!movState
->prepare()) movState
= nullptr;
1543 movState
->setTitle(screen
);
1548 /* Nothing more to play. Shut everything down and quit. */
1551 alcMakeContextCurrent(nullptr);
1552 alcDestroyContext(context
);
1553 alcCloseDevice(device
);
1555 SDL_DestroyRenderer(renderer
);
1557 SDL_DestroyWindow(screen
);
1568 std::cerr
<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl
;