2 * An example showing how to play a stream sync'd to video, using ffmpeg.
7 #include <condition_variable>
21 #include "libavcodec/avcodec.h"
22 #include "libavformat/avformat.h"
23 #include "libavformat/avio.h"
24 #include "libavutil/time.h"
25 #include "libavutil/pixfmt.h"
26 #include "libavutil/avstring.h"
27 #include "libavutil/channel_layout.h"
28 #include "libswscale/swscale.h"
29 #include "libswresample/swresample.h"
41 static const std::string
AppName("alffplay");
43 static bool do_direct_out
= false;
44 static bool has_latency_check
= false;
45 static LPALGETSOURCEDVSOFT alGetSourcedvSOFT
;
47 #define AUDIO_BUFFER_TIME 100 /* In milliseconds, per-buffer */
48 #define AUDIO_BUFFER_QUEUE_SIZE 8 /* Number of buffers to queue */
49 #define MAX_QUEUE_SIZE (15 * 1024 * 1024) /* Bytes of compressed data to keep queued */
50 #define AV_SYNC_THRESHOLD 0.01
51 #define AV_NOSYNC_THRESHOLD 10.0
52 #define SAMPLE_CORRECTION_MAX_DIFF 0.05
53 #define AUDIO_DIFF_AVG_NB 20
54 #define VIDEO_PICTURE_QUEUE_SIZE 16
57 FF_UPDATE_EVENT
= SDL_USEREVENT
,
65 AV_SYNC_EXTERNAL_MASTER
,
67 DEFAULT_AV_SYNC_TYPE
= AV_SYNC_EXTERNAL_MASTER
72 std::deque
<AVPacket
> mPackets
;
73 std::atomic
<int> mTotalSize
;
74 std::atomic
<bool> mFinished
;
76 std::condition_variable mCond
;
78 PacketQueue() : mTotalSize(0), mFinished(false)
83 int put(const AVPacket
*pkt
);
84 int peek(AVPacket
*pkt
, std::atomic
<bool> &quit_var
);
98 AVCodecContext
*mCodecCtx
;
102 /* Used for clock difference average computation */
104 std::atomic
<int> Clocks
; /* In microseconds */
111 /* Time (in seconds) of the next sample to be buffered */
114 /* Decompressed sample frame, and swresample context for conversion */
115 AVFrame
*mDecodedFrame
;
116 struct SwrContext
*mSwresCtx
;
118 /* Conversion format, for what gets fed to Alure */
120 enum AVSampleFormat mDstSampleFmt
;
122 /* Storage of converted samples */
124 int mSamplesLen
; /* In samples */
132 std::recursive_mutex mSrcMutex
;
134 ALuint mBuffers
[AUDIO_BUFFER_QUEUE_SIZE
];
137 AudioState(MovieState
*movie
)
138 : mMovie(movie
), mStream(nullptr), mCodecCtx(nullptr)
139 , mDiff
{{0}, 0.0, 0.0, 0.0, 0}, mCurrentPts(0.0), mDecodedFrame(nullptr)
140 , mSwresCtx(nullptr), mDstChanLayout(0), mDstSampleFmt(AV_SAMPLE_FMT_NONE
)
141 , mSamples(nullptr), mSamplesLen(0), mSamplesPos(0), mSamplesMax(0)
142 , mFormat(AL_NONE
), mFrameSize(0), mSource(0), mBufferIdx(0)
144 for(auto &buf
: mBuffers
)
150 alDeleteSources(1, &mSource
);
151 alDeleteBuffers(AUDIO_BUFFER_QUEUE_SIZE
, mBuffers
);
153 av_frame_free(&mDecodedFrame
);
154 swr_free(&mSwresCtx
);
158 avcodec_free_context(&mCodecCtx
);
165 int readAudio(uint8_t *samples
, int length
);
174 AVCodecContext
*mCodecCtx
;
180 double mFrameLastPts
;
181 double mFrameLastDelay
;
183 /* time (av_gettime) at which we updated mCurrentPts - used to have running video pts */
184 int64_t mCurrentPtsTime
;
186 /* Decompressed video frame, and swscale context for conversion */
187 AVFrame
*mDecodedFrame
;
188 struct SwsContext
*mSwscaleCtx
;
192 int mWidth
, mHeight
; /* Logical image size (actual size may be larger) */
193 std::atomic
<bool> mUpdated
;
197 : mImage(nullptr), mWidth(0), mHeight(0), mUpdated(false), mPts(0.0)
202 SDL_DestroyTexture(mImage
);
206 std::array
<Picture
,VIDEO_PICTURE_QUEUE_SIZE
> mPictQ
;
207 size_t mPictQSize
, mPictQRead
, mPictQWrite
;
208 std::mutex mPictQMutex
;
209 std::condition_variable mPictQCond
;
211 std::atomic
<bool> mEOS
;
212 std::atomic
<bool> mFinalUpdate
;
214 VideoState(MovieState
*movie
)
215 : mMovie(movie
), mStream(nullptr), mCodecCtx(nullptr), mClock(0.0)
216 , mFrameTimer(0.0), mFrameLastPts(0.0), mFrameLastDelay(0.0)
217 , mCurrentPts(0.0), mCurrentPtsTime(0), mDecodedFrame(nullptr)
218 , mSwscaleCtx(nullptr), mPictQSize(0), mPictQRead(0), mPictQWrite(0)
219 , mFirstUpdate(true), mEOS(false), mFinalUpdate(false)
223 sws_freeContext(mSwscaleCtx
);
224 mSwscaleCtx
= nullptr;
225 av_frame_free(&mDecodedFrame
);
226 avcodec_free_context(&mCodecCtx
);
231 static Uint32 SDLCALL
sdl_refresh_timer_cb(Uint32 interval
, void *opaque
);
232 void schedRefresh(int delay
);
233 void display(SDL_Window
*screen
, SDL_Renderer
*renderer
);
234 void refreshTimer(SDL_Window
*screen
, SDL_Renderer
*renderer
);
235 void updatePicture(SDL_Window
*screen
, SDL_Renderer
*renderer
);
236 int queuePicture(double pts
);
237 double synchronize(double pts
);
242 AVFormatContext
*mFormatCtx
;
243 int mVideoStream
, mAudioStream
;
247 int64_t mExternalClockBase
;
249 std::atomic
<bool> mQuit
;
254 std::thread mParseThread
;
255 std::thread mAudioThread
;
256 std::thread mVideoThread
;
258 std::string mFilename
;
260 MovieState(std::string fname
)
261 : mFormatCtx(nullptr), mVideoStream(0), mAudioStream(0)
262 , mAVSyncType(DEFAULT_AV_SYNC_TYPE
), mExternalClockBase(0), mQuit(false)
263 , mAudio(this), mVideo(this), mFilename(std::move(fname
))
268 if(mParseThread
.joinable())
270 avformat_close_input(&mFormatCtx
);
273 static int decode_interrupt_cb(void *ctx
);
275 void setTitle(SDL_Window
*window
);
279 double getMasterClock();
281 int streamComponentOpen(int stream_index
);
286 int PacketQueue::put(const AVPacket
*pkt
)
288 std::unique_lock
<std::mutex
> lock(mMutex
);
289 mPackets
.push_back(AVPacket
{});
290 if(av_packet_ref(&mPackets
.back(), pkt
) != 0)
295 mTotalSize
+= mPackets
.back().size
;
302 int PacketQueue::peek(AVPacket
*pkt
, std::atomic
<bool> &quit_var
)
304 std::unique_lock
<std::mutex
> lock(mMutex
);
305 while(!quit_var
.load())
307 if(!mPackets
.empty())
309 if(av_packet_ref(pkt
, &mPackets
.front()) != 0)
321 void PacketQueue::pop()
323 std::unique_lock
<std::mutex
> lock(mMutex
);
324 AVPacket
*pkt
= &mPackets
.front();
325 mTotalSize
-= pkt
->size
;
326 av_packet_unref(pkt
);
327 mPackets
.pop_front();
330 void PacketQueue::clear()
332 std::unique_lock
<std::mutex
> lock(mMutex
);
333 std::for_each(mPackets
.begin(), mPackets
.end(),
334 [](AVPacket
&pkt
) { av_packet_unref(&pkt
); }
339 void PacketQueue::finish()
341 std::unique_lock
<std::mutex
> lock(mMutex
);
348 double AudioState::getClock()
352 std::unique_lock
<std::recursive_mutex
> lock(mSrcMutex
);
353 /* The audio clock is the timestamp of the sample currently being heard.
354 * It's based on 4 components:
355 * 1 - The timestamp of the next sample to buffer (state->current_pts)
356 * 2 - The length of the source's buffer queue
357 * 3 - The offset OpenAL is currently at in the source (the first value
358 * from AL_SEC_OFFSET_LATENCY_SOFT)
359 * 4 - The latency between OpenAL and the DAC (the second value from
360 * AL_SEC_OFFSET_LATENCY_SOFT)
362 * Subtracting the length of the source queue from the next sample's
363 * timestamp gives the timestamp of the sample at start of the source
364 * queue. Adding the source offset to that results in the timestamp for
365 * OpenAL's current position, and subtracting the source latency from that
366 * gives the timestamp of the sample currently at the DAC.
375 /* NOTE: The source state must be checked last, in case an underrun
376 * occurs and the source stops between retrieving the offset+latency
377 * and getting the state. */
378 if(has_latency_check
)
380 alGetSourcedvSOFT(mSource
, AL_SEC_OFFSET_LATENCY_SOFT
, offset
);
381 alGetSourcei(mSource
, AL_BUFFERS_QUEUED
, &queue_size
);
386 alGetSourcei(mSource
, AL_SAMPLE_OFFSET
, &ioffset
);
387 alGetSourcei(mSource
, AL_BUFFERS_QUEUED
, &queue_size
);
388 offset
[0] = (double)ioffset
/ (double)mCodecCtx
->sample_rate
;
391 alGetSourcei(mSource
, AL_SOURCE_STATE
, &status
);
393 /* If the source is AL_STOPPED, then there was an underrun and all
394 * buffers are processed, so ignore the source queue. The audio thread
395 * will put the source into an AL_INITIAL state and clear the queue
396 * when it starts recovery. */
397 if(status
!= AL_STOPPED
)
398 pts
-= queue_size
*((double)AUDIO_BUFFER_TIME
/1000.0) - offset
[0];
399 if(status
== AL_PLAYING
)
404 return std::max(pts
, 0.0);
407 int AudioState::getSync()
409 double diff
, avg_diff
, ref_clock
;
411 if(mMovie
->mAVSyncType
== AV_SYNC_AUDIO_MASTER
)
414 ref_clock
= mMovie
->getMasterClock();
415 diff
= ref_clock
- getClock();
417 if(!(fabs(diff
) < AV_NOSYNC_THRESHOLD
))
419 /* Difference is TOO big; reset diff stuff */
424 /* Accumulate the diffs */
425 mDiff
.Accum
= mDiff
.Accum
*mDiff
.AvgCoeff
+ diff
;
426 avg_diff
= mDiff
.Accum
*(1.0 - mDiff
.AvgCoeff
);
427 if(fabs(avg_diff
) < mDiff
.Threshold
)
430 /* Constrain the per-update difference to avoid exceedingly large skips */
431 if(!(diff
<= SAMPLE_CORRECTION_MAX_DIFF
))
432 diff
= SAMPLE_CORRECTION_MAX_DIFF
;
433 else if(!(diff
>= -SAMPLE_CORRECTION_MAX_DIFF
))
434 diff
= -SAMPLE_CORRECTION_MAX_DIFF
;
435 return (int)(diff
*mCodecCtx
->sample_rate
);
438 int AudioState::decodeFrame()
440 while(!mMovie
->mQuit
.load())
442 while(!mMovie
->mQuit
.load())
444 /* Get the next packet */
446 if(mQueue
.peek(&pkt
, mMovie
->mQuit
) <= 0)
449 int ret
= avcodec_send_packet(mCodecCtx
, &pkt
);
450 if(ret
!= AVERROR(EAGAIN
))
453 std::cerr
<< "Failed to send encoded packet: 0x"<<std::hex
<<ret
<<std::dec
<<std::endl
;
456 av_packet_unref(&pkt
);
457 if(ret
== 0 || ret
== AVERROR(EAGAIN
))
461 int ret
= avcodec_receive_frame(mCodecCtx
, mDecodedFrame
);
462 if(ret
== AVERROR(EAGAIN
))
464 if(ret
== AVERROR_EOF
|| ret
< 0)
466 std::cerr
<< "Failed to decode frame: "<<ret
<<std::endl
;
470 if(mDecodedFrame
->nb_samples
<= 0)
472 av_frame_unref(mDecodedFrame
);
476 /* If provided, update w/ pts */
477 int64_t pts
= av_frame_get_best_effort_timestamp(mDecodedFrame
);
478 if(pts
!= AV_NOPTS_VALUE
)
479 mCurrentPts
= av_q2d(mStream
->time_base
)*pts
;
481 if(mDecodedFrame
->nb_samples
> mSamplesMax
)
485 &mSamples
, nullptr, mCodecCtx
->channels
,
486 mDecodedFrame
->nb_samples
, mDstSampleFmt
, 0
488 mSamplesMax
= mDecodedFrame
->nb_samples
;
490 /* Return the amount of sample frames converted */
491 int data_size
= swr_convert(mSwresCtx
, &mSamples
, mDecodedFrame
->nb_samples
,
492 (const uint8_t**)mDecodedFrame
->data
, mDecodedFrame
->nb_samples
495 av_frame_unref(mDecodedFrame
);
502 /* Duplicates the sample at in to out, count times. The frame size is a
503 * multiple of the template type size.
506 static void sample_dup(uint8_t *out
, const uint8_t *in
, int count
, int frame_size
)
508 const T
*sample
= reinterpret_cast<const T
*>(in
);
509 T
*dst
= reinterpret_cast<T
*>(out
);
510 if(frame_size
== sizeof(T
))
511 std::fill_n(dst
, count
, *sample
);
514 /* NOTE: frame_size is a multiple of sizeof(T). */
515 int type_mult
= frame_size
/ sizeof(T
);
517 std::generate_n(dst
, count
*type_mult
,
518 [sample
,type_mult
,&i
]() -> T
529 int AudioState::readAudio(uint8_t *samples
, int length
)
531 int sample_skip
= getSync();
534 /* Read the next chunk of data, refill the buffer, and queue it
536 length
/= mFrameSize
;
537 while(audio_size
< length
)
539 if(mSamplesLen
<= 0 || mSamplesPos
>= mSamplesLen
)
541 int frame_len
= decodeFrame();
542 if(frame_len
<= 0) break;
544 mSamplesLen
= frame_len
;
545 mSamplesPos
= std::min(mSamplesLen
, sample_skip
);
546 sample_skip
-= mSamplesPos
;
548 mCurrentPts
+= (double)mSamplesPos
/ (double)mCodecCtx
->sample_rate
;
552 int rem
= length
- audio_size
;
555 int len
= mSamplesLen
- mSamplesPos
;
556 if(rem
> len
) rem
= len
;
557 memcpy(samples
, mSamples
+ mSamplesPos
*mFrameSize
, rem
*mFrameSize
);
561 rem
= std::min(rem
, -mSamplesPos
);
563 /* Add samples by copying the first sample */
564 if((mFrameSize
&7) == 0)
565 sample_dup
<uint64_t>(samples
, mSamples
, rem
, mFrameSize
);
566 else if((mFrameSize
&3) == 0)
567 sample_dup
<uint32_t>(samples
, mSamples
, rem
, mFrameSize
);
568 else if((mFrameSize
&1) == 0)
569 sample_dup
<uint16_t>(samples
, mSamples
, rem
, mFrameSize
);
571 sample_dup
<uint8_t>(samples
, mSamples
, rem
, mFrameSize
);
575 mCurrentPts
+= (double)rem
/ mCodecCtx
->sample_rate
;
576 samples
+= rem
*mFrameSize
;
580 if(audio_size
< length
&& audio_size
> 0)
582 int rem
= length
- audio_size
;
583 std::fill_n(samples
, rem
*mFrameSize
,
584 (mDstSampleFmt
== AV_SAMPLE_FMT_U8
) ? 0x80 : 0x00);
585 mCurrentPts
+= (double)rem
/ mCodecCtx
->sample_rate
;
589 return audio_size
* mFrameSize
;
593 int AudioState::handler()
595 std::unique_lock
<std::recursive_mutex
> lock(mSrcMutex
);
598 /* Find a suitable format for Alure. */
600 if(mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_U8
|| mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_U8P
)
602 mDstSampleFmt
= AV_SAMPLE_FMT_U8
;
604 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_7POINT1
&&
605 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
606 (fmt
=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE
&& fmt
!= -1)
608 mDstChanLayout
= mCodecCtx
->channel_layout
;
612 if((mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1
||
613 mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1_BACK
) &&
614 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
615 (fmt
=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE
&& fmt
!= -1)
617 mDstChanLayout
= mCodecCtx
->channel_layout
;
621 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_MONO
)
623 mDstChanLayout
= mCodecCtx
->channel_layout
;
625 mFormat
= AL_FORMAT_MONO8
;
629 mDstChanLayout
= AV_CH_LAYOUT_STEREO
;
631 mFormat
= AL_FORMAT_STEREO8
;
634 if((mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_FLT
|| mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_FLTP
) &&
635 alIsExtensionPresent("AL_EXT_FLOAT32"))
637 mDstSampleFmt
= AV_SAMPLE_FMT_FLT
;
639 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_7POINT1
&&
640 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
641 (fmt
=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE
&& fmt
!= -1)
643 mDstChanLayout
= mCodecCtx
->channel_layout
;
647 if((mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1
||
648 mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1_BACK
) &&
649 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
650 (fmt
=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE
&& fmt
!= -1)
652 mDstChanLayout
= mCodecCtx
->channel_layout
;
656 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_MONO
)
658 mDstChanLayout
= mCodecCtx
->channel_layout
;
660 mFormat
= AL_FORMAT_MONO_FLOAT32
;
664 mDstChanLayout
= AV_CH_LAYOUT_STEREO
;
666 mFormat
= AL_FORMAT_STEREO_FLOAT32
;
671 mDstSampleFmt
= AV_SAMPLE_FMT_S16
;
673 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_7POINT1
&&
674 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
675 (fmt
=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE
&& fmt
!= -1)
677 mDstChanLayout
= mCodecCtx
->channel_layout
;
681 if((mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1
||
682 mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1_BACK
) &&
683 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
684 (fmt
=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE
&& fmt
!= -1)
686 mDstChanLayout
= mCodecCtx
->channel_layout
;
690 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_MONO
)
692 mDstChanLayout
= mCodecCtx
->channel_layout
;
694 mFormat
= AL_FORMAT_MONO16
;
698 mDstChanLayout
= AV_CH_LAYOUT_STEREO
;
700 mFormat
= AL_FORMAT_STEREO16
;
703 ALsizei buffer_len
= mCodecCtx
->sample_rate
* AUDIO_BUFFER_TIME
/ 1000 *
705 void *samples
= av_malloc(buffer_len
);
712 if(!(mDecodedFrame
=av_frame_alloc()))
714 std::cerr
<< "Failed to allocate audio frame" <<std::endl
;
718 mSwresCtx
= swr_alloc_set_opts(nullptr,
719 mDstChanLayout
, mDstSampleFmt
, mCodecCtx
->sample_rate
,
720 mCodecCtx
->channel_layout
? mCodecCtx
->channel_layout
:
721 (uint64_t)av_get_default_channel_layout(mCodecCtx
->channels
),
722 mCodecCtx
->sample_fmt
, mCodecCtx
->sample_rate
,
725 if(!mSwresCtx
|| swr_init(mSwresCtx
) != 0)
727 std::cerr
<< "Failed to initialize audio converter" <<std::endl
;
731 alGenBuffers(AUDIO_BUFFER_QUEUE_SIZE
, mBuffers
);
732 alGenSources(1, &mSource
);
736 if(!alIsExtensionPresent("AL_SOFT_direct_channels"))
737 std::cerr
<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl
;
740 alSourcei(mSource
, AL_DIRECT_CHANNELS_SOFT
, AL_TRUE
);
741 std::cout
<< "Direct out enabled" <<std::endl
;
745 while(alGetError() == AL_NO_ERROR
&& !mMovie
->mQuit
.load())
747 /* First remove any processed buffers. */
749 alGetSourcei(mSource
, AL_BUFFERS_PROCESSED
, &processed
);
752 std::array
<ALuint
,AUDIO_BUFFER_QUEUE_SIZE
> tmp
;
753 alSourceUnqueueBuffers(mSource
, processed
, tmp
.data());
756 /* Refill the buffer queue. */
758 alGetSourcei(mSource
, AL_BUFFERS_QUEUED
, &queued
);
759 while(queued
< AUDIO_BUFFER_QUEUE_SIZE
)
763 /* Read the next chunk of data, fill the buffer, and queue it on
765 audio_size
= readAudio(reinterpret_cast<uint8_t*>(samples
), buffer_len
);
766 if(audio_size
<= 0) break;
768 ALuint bufid
= mBuffers
[mBufferIdx
++];
769 mBufferIdx
%= AUDIO_BUFFER_QUEUE_SIZE
;
771 alBufferData(bufid
, mFormat
, samples
, audio_size
, mCodecCtx
->sample_rate
);
772 alSourceQueueBuffers(mSource
, 1, &bufid
);
778 /* Check that the source is playing. */
780 alGetSourcei(mSource
, AL_SOURCE_STATE
, &state
);
781 if(state
== AL_STOPPED
)
783 /* AL_STOPPED means there was an underrun. Rewind the source to get
784 * it back into an AL_INITIAL state.
786 alSourceRewind(mSource
);
792 /* (re)start the source if needed, and wait for a buffer to finish */
793 if(state
!= AL_PLAYING
&& state
!= AL_PAUSED
)
794 alSourcePlay(mSource
);
795 SDL_Delay(AUDIO_BUFFER_TIME
/ 3);
801 alSourceRewind(mSource
);
802 alSourcei(mSource
, AL_BUFFER
, 0);
804 av_frame_free(&mDecodedFrame
);
805 swr_free(&mSwresCtx
);
813 double VideoState::getClock()
815 double delta
= (av_gettime() - mCurrentPtsTime
) / 1000000.0;
816 return mCurrentPts
+ delta
;
819 Uint32 SDLCALL
VideoState::sdl_refresh_timer_cb(Uint32
/*interval*/, void *opaque
)
822 evt
.user
.type
= FF_REFRESH_EVENT
;
823 evt
.user
.data1
= opaque
;
825 return 0; /* 0 means stop timer */
828 /* Schedules an FF_REFRESH_EVENT event to occur in 'delay' ms. */
829 void VideoState::schedRefresh(int delay
)
831 SDL_AddTimer(delay
, sdl_refresh_timer_cb
, this);
834 /* Called by VideoState::refreshTimer to display the next video frame. */
835 void VideoState::display(SDL_Window
*screen
, SDL_Renderer
*renderer
)
837 Picture
*vp
= &mPictQ
[mPictQRead
];
846 if(mCodecCtx
->sample_aspect_ratio
.num
== 0)
850 aspect_ratio
= av_q2d(mCodecCtx
->sample_aspect_ratio
) * mCodecCtx
->width
/
853 if(aspect_ratio
<= 0.0f
)
854 aspect_ratio
= (float)mCodecCtx
->width
/ (float)mCodecCtx
->height
;
856 SDL_GetWindowSize(screen
, &win_w
, &win_h
);
858 w
= ((int)rint(h
* aspect_ratio
) + 3) & ~3;
862 h
= ((int)rint(w
/ aspect_ratio
) + 3) & ~3;
867 SDL_Rect src_rect
{ 0, 0, vp
->mWidth
, vp
->mHeight
};
868 SDL_Rect dst_rect
{ x
, y
, w
, h
};
869 SDL_RenderCopy(renderer
, vp
->mImage
, &src_rect
, &dst_rect
);
870 SDL_RenderPresent(renderer
);
873 /* FF_REFRESH_EVENT handler called on the main thread where the SDL_Renderer
874 * was created. It handles the display of the next decoded video frame (if not
875 * falling behind), and sets up the timer for the following video frame.
877 void VideoState::refreshTimer(SDL_Window
*screen
, SDL_Renderer
*renderer
)
884 std::unique_lock
<std::mutex
>(mPictQMutex
).unlock();
885 mPictQCond
.notify_all();
892 std::unique_lock
<std::mutex
> lock(mPictQMutex
);
901 mPictQCond
.notify_all();
905 Picture
*vp
= &mPictQ
[mPictQRead
];
906 mCurrentPts
= vp
->mPts
;
907 mCurrentPtsTime
= av_gettime();
909 /* Get delay using the frame pts and the pts from last frame. */
910 double delay
= vp
->mPts
- mFrameLastPts
;
911 if(delay
<= 0 || delay
>= 1.0)
913 /* If incorrect delay, use previous one. */
914 delay
= mFrameLastDelay
;
916 /* Save for next frame. */
917 mFrameLastDelay
= delay
;
918 mFrameLastPts
= vp
->mPts
;
920 /* Update delay to sync to clock if not master source. */
921 if(mMovie
->mAVSyncType
!= AV_SYNC_VIDEO_MASTER
)
923 double ref_clock
= mMovie
->getMasterClock();
924 double diff
= vp
->mPts
- ref_clock
;
926 /* Skip or repeat the frame. Take delay into account. */
927 double sync_threshold
= std::min(delay
, AV_SYNC_THRESHOLD
);
928 if(fabs(diff
) < AV_NOSYNC_THRESHOLD
)
930 if(diff
<= -sync_threshold
)
932 else if(diff
>= sync_threshold
)
937 mFrameTimer
+= delay
;
938 /* Compute the REAL delay. */
939 double actual_delay
= mFrameTimer
- (av_gettime() / 1000000.0);
940 if(!(actual_delay
>= 0.010))
942 /* We don't have time to handle this picture, just skip to the next one. */
943 mPictQRead
= (mPictQRead
+1)%mPictQ
.size();
947 schedRefresh((int)(actual_delay
*1000.0 + 0.5));
949 /* Show the picture! */
950 display(screen
, renderer
);
952 /* Update queue for next picture. */
953 mPictQRead
= (mPictQRead
+1)%mPictQ
.size();
956 mPictQCond
.notify_all();
959 /* FF_UPDATE_EVENT handler, updates the picture's texture. It's called on the
960 * main thread where the renderer was created.
962 void VideoState::updatePicture(SDL_Window
*screen
, SDL_Renderer
*renderer
)
964 Picture
*vp
= &mPictQ
[mPictQWrite
];
965 bool fmt_updated
= false;
967 /* allocate or resize the buffer! */
968 if(!vp
->mImage
|| vp
->mWidth
!= mCodecCtx
->width
|| vp
->mHeight
!= mCodecCtx
->height
)
972 SDL_DestroyTexture(vp
->mImage
);
973 vp
->mImage
= SDL_CreateTexture(
974 renderer
, SDL_PIXELFORMAT_IYUV
, SDL_TEXTUREACCESS_STREAMING
,
975 mCodecCtx
->coded_width
, mCodecCtx
->coded_height
978 std::cerr
<< "Failed to create YV12 texture!" <<std::endl
;
979 vp
->mWidth
= mCodecCtx
->width
;
980 vp
->mHeight
= mCodecCtx
->height
;
982 if(mFirstUpdate
&& vp
->mWidth
> 0 && vp
->mHeight
> 0)
984 /* For the first update, set the window size to the video size. */
985 mFirstUpdate
= false;
989 if(mCodecCtx
->sample_aspect_ratio
.den
!= 0)
991 double aspect_ratio
= av_q2d(mCodecCtx
->sample_aspect_ratio
);
992 if(aspect_ratio
>= 1.0)
993 w
= (int)(w
*aspect_ratio
+ 0.5);
994 else if(aspect_ratio
> 0.0)
995 h
= (int)(h
/aspect_ratio
+ 0.5);
997 SDL_SetWindowSize(screen
, w
, h
);
1003 AVFrame
*frame
= mDecodedFrame
;
1004 void *pixels
= nullptr;
1007 if(mCodecCtx
->pix_fmt
== AV_PIX_FMT_YUV420P
)
1008 SDL_UpdateYUVTexture(vp
->mImage
, nullptr,
1009 frame
->data
[0], frame
->linesize
[0],
1010 frame
->data
[1], frame
->linesize
[1],
1011 frame
->data
[2], frame
->linesize
[2]
1013 else if(SDL_LockTexture(vp
->mImage
, nullptr, &pixels
, &pitch
) != 0)
1014 std::cerr
<< "Failed to lock texture" <<std::endl
;
1017 // Convert the image into YUV format that SDL uses
1018 int coded_w
= mCodecCtx
->coded_width
;
1019 int coded_h
= mCodecCtx
->coded_height
;
1020 int w
= mCodecCtx
->width
;
1021 int h
= mCodecCtx
->height
;
1022 if(!mSwscaleCtx
|| fmt_updated
)
1024 sws_freeContext(mSwscaleCtx
);
1025 mSwscaleCtx
= sws_getContext(
1026 w
, h
, mCodecCtx
->pix_fmt
,
1027 w
, h
, AV_PIX_FMT_YUV420P
, 0,
1028 nullptr, nullptr, nullptr
1032 /* point pict at the queue */
1033 uint8_t *pict_data
[3];
1034 pict_data
[0] = reinterpret_cast<uint8_t*>(pixels
);
1035 pict_data
[1] = pict_data
[0] + coded_w
*coded_h
;
1036 pict_data
[2] = pict_data
[1] + coded_w
*coded_h
/4;
1038 int pict_linesize
[3];
1039 pict_linesize
[0] = pitch
;
1040 pict_linesize
[1] = pitch
/ 2;
1041 pict_linesize
[2] = pitch
/ 2;
1043 sws_scale(mSwscaleCtx
, (const uint8_t**)frame
->data
,
1044 frame
->linesize
, 0, h
, pict_data
, pict_linesize
);
1045 SDL_UnlockTexture(vp
->mImage
);
1049 std::unique_lock
<std::mutex
> lock(mPictQMutex
);
1050 vp
->mUpdated
= true;
1052 mPictQCond
.notify_one();
1055 int VideoState::queuePicture(double pts
)
1057 /* Wait until we have space for a new pic */
1058 std::unique_lock
<std::mutex
> lock(mPictQMutex
);
1059 while(mPictQSize
>= mPictQ
.size() && !mMovie
->mQuit
.load())
1060 mPictQCond
.wait(lock
);
1063 if(mMovie
->mQuit
.load())
1066 Picture
*vp
= &mPictQ
[mPictQWrite
];
1068 /* We have to create/update the picture in the main thread */
1069 vp
->mUpdated
= false;
1071 evt
.user
.type
= FF_UPDATE_EVENT
;
1072 evt
.user
.data1
= this;
1073 SDL_PushEvent(&evt
);
1075 /* Wait until the picture is updated. */
1077 while(!vp
->mUpdated
&& !mMovie
->mQuit
.load())
1078 mPictQCond
.wait(lock
);
1079 if(mMovie
->mQuit
.load())
1083 mPictQWrite
= (mPictQWrite
+1)%mPictQ
.size();
1090 double VideoState::synchronize(double pts
)
1094 if(pts
== 0.0) /* if we aren't given a pts, set it to the clock */
1096 else /* if we have pts, set video clock to it */
1099 /* update the video clock */
1100 frame_delay
= av_q2d(mCodecCtx
->time_base
);
1101 /* if we are repeating a frame, adjust clock accordingly */
1102 frame_delay
+= mDecodedFrame
->repeat_pict
* (frame_delay
* 0.5);
1103 mClock
+= frame_delay
;
1107 int VideoState::handler()
1109 mDecodedFrame
= av_frame_alloc();
1110 while(!mMovie
->mQuit
)
1112 while(!mMovie
->mQuit
)
1115 if(mQueue
.peek(&packet
, mMovie
->mQuit
) <= 0)
1118 int ret
= avcodec_send_packet(mCodecCtx
, &packet
);
1119 if(ret
!= AVERROR(EAGAIN
))
1122 std::cerr
<< "Failed to send encoded packet: 0x"<<std::hex
<<ret
<<std::dec
<<std::endl
;
1125 av_packet_unref(&packet
);
1126 if(ret
== 0 || ret
== AVERROR(EAGAIN
))
1130 /* Decode video frame */
1131 int ret
= avcodec_receive_frame(mCodecCtx
, mDecodedFrame
);
1132 if(ret
== AVERROR(EAGAIN
))
1136 std::cerr
<< "Failed to decode frame: "<<ret
<<std::endl
;
1140 double pts
= synchronize(
1141 av_q2d(mStream
->time_base
) * av_frame_get_best_effort_timestamp(mDecodedFrame
)
1143 if(queuePicture(pts
) < 0)
1145 av_frame_unref(mDecodedFrame
);
1149 av_frame_free(&mDecodedFrame
);
1151 std::unique_lock
<std::mutex
> lock(mPictQMutex
);
1158 while(!mFinalUpdate
)
1159 mPictQCond
.wait(lock
);
1165 int MovieState::decode_interrupt_cb(void *ctx
)
1167 return reinterpret_cast<MovieState
*>(ctx
)->mQuit
;
1170 bool MovieState::prepare()
1172 mFormatCtx
= avformat_alloc_context();
1173 mFormatCtx
->interrupt_callback
.callback
= decode_interrupt_cb
;
1174 mFormatCtx
->interrupt_callback
.opaque
= this;
1175 if(avio_open2(&mFormatCtx
->pb
, mFilename
.c_str(), AVIO_FLAG_READ
,
1176 &mFormatCtx
->interrupt_callback
, nullptr))
1178 std::cerr
<< "Failed to open "<<mFilename
<<std::endl
;
1182 /* Open movie file */
1183 if(avformat_open_input(&mFormatCtx
, mFilename
.c_str(), nullptr, nullptr) != 0)
1185 std::cerr
<< "Failed to open "<<mFilename
<<std::endl
;
1189 /* Retrieve stream information */
1190 if(avformat_find_stream_info(mFormatCtx
, nullptr) < 0)
1192 std::cerr
<< mFilename
<<": failed to find stream info" <<std::endl
;
1196 mVideo
.schedRefresh(40);
1198 mParseThread
= std::thread(std::mem_fn(&MovieState::parse_handler
), this);
1202 void MovieState::setTitle(SDL_Window
*window
)
1204 auto pos1
= mFilename
.rfind('/');
1205 auto pos2
= mFilename
.rfind('\\');
1206 auto fpos
= ((pos1
== std::string::npos
) ? pos2
:
1207 (pos2
== std::string::npos
) ? pos1
:
1208 std::max(pos1
, pos2
)) + 1;
1209 SDL_SetWindowTitle(window
, (mFilename
.substr(fpos
)+" - "+AppName
).c_str());
1212 double MovieState::getClock()
1214 return (av_gettime()-mExternalClockBase
) / 1000000.0;
1217 double MovieState::getMasterClock()
1219 if(mAVSyncType
== AV_SYNC_VIDEO_MASTER
)
1220 return mVideo
.getClock();
1221 if(mAVSyncType
== AV_SYNC_AUDIO_MASTER
)
1222 return mAudio
.getClock();
1226 int MovieState::streamComponentOpen(int stream_index
)
1228 if(stream_index
< 0 || (unsigned int)stream_index
>= mFormatCtx
->nb_streams
)
1231 /* Get a pointer to the codec context for the stream, and open the
1234 AVCodecContext
*avctx
= avcodec_alloc_context3(nullptr);
1235 if(!avctx
) return -1;
1237 if(avcodec_parameters_to_context(avctx
, mFormatCtx
->streams
[stream_index
]->codecpar
))
1239 avcodec_free_context(&avctx
);
1243 AVCodec
*codec
= avcodec_find_decoder(avctx
->codec_id
);
1244 if(!codec
|| avcodec_open2(avctx
, codec
, nullptr) < 0)
1246 std::cerr
<< "Unsupported codec: "<<avcodec_get_name(avctx
->codec_id
)
1247 << " (0x"<<std::hex
<<avctx
->codec_id
<<std::dec
<<")" <<std::endl
;
1248 avcodec_free_context(&avctx
);
1252 /* Initialize and start the media type handler */
1253 switch(avctx
->codec_type
)
1255 case AVMEDIA_TYPE_AUDIO
:
1256 mAudioStream
= stream_index
;
1257 mAudio
.mStream
= mFormatCtx
->streams
[stream_index
];
1258 mAudio
.mCodecCtx
= avctx
;
1260 /* Averaging filter for audio sync */
1261 mAudio
.mDiff
.AvgCoeff
= exp(log(0.01) / AUDIO_DIFF_AVG_NB
);
1262 /* Correct audio only if larger error than this */
1263 mAudio
.mDiff
.Threshold
= 0.050/* 50 ms */;
1265 mAudioThread
= std::thread(std::mem_fn(&AudioState::handler
), &mAudio
);
1268 case AVMEDIA_TYPE_VIDEO
:
1269 mVideoStream
= stream_index
;
1270 mVideo
.mStream
= mFormatCtx
->streams
[stream_index
];
1271 mVideo
.mCodecCtx
= avctx
;
1273 mVideo
.mCurrentPtsTime
= av_gettime();
1274 mVideo
.mFrameTimer
= (double)mVideo
.mCurrentPtsTime
/ 1000000.0;
1275 mVideo
.mFrameLastDelay
= 40e-3;
1277 mVideoThread
= std::thread(std::mem_fn(&VideoState::handler
), &mVideo
);
1281 avcodec_free_context(&avctx
);
1288 int MovieState::parse_handler()
1290 int video_index
= -1;
1291 int audio_index
= -1;
1296 /* Dump information about file onto standard error */
1297 av_dump_format(mFormatCtx
, 0, mFilename
.c_str(), 0);
1299 /* Find the first video and audio streams */
1300 for(unsigned int i
= 0;i
< mFormatCtx
->nb_streams
;i
++)
1302 if(mFormatCtx
->streams
[i
]->codecpar
->codec_type
== AVMEDIA_TYPE_VIDEO
&& video_index
< 0)
1304 else if(mFormatCtx
->streams
[i
]->codecpar
->codec_type
== AVMEDIA_TYPE_AUDIO
&& audio_index
< 0)
1307 /* Start the external clock in 50ms, to give the audio and video
1308 * components time to start without needing to skip ahead.
1310 mExternalClockBase
= av_gettime() + 50000;
1311 if(audio_index
>= 0)
1312 streamComponentOpen(audio_index
);
1313 if(video_index
>= 0)
1314 streamComponentOpen(video_index
);
1316 if(mVideoStream
< 0 && mAudioStream
< 0)
1318 std::cerr
<< mFilename
<<": could not open codecs" <<std::endl
;
1322 /* Main packet handling loop */
1323 while(!mQuit
.load())
1325 if(mAudio
.mQueue
.mTotalSize
+ mVideo
.mQueue
.mTotalSize
>= MAX_QUEUE_SIZE
)
1327 std::this_thread::sleep_for(std::chrono::milliseconds(10));
1332 if(av_read_frame(mFormatCtx
, &packet
) < 0)
1335 /* Copy the packet in the queue it's meant for. */
1336 if(packet
.stream_index
== mVideoStream
)
1337 mVideo
.mQueue
.put(&packet
);
1338 else if(packet
.stream_index
== mAudioStream
)
1339 mAudio
.mQueue
.put(&packet
);
1340 av_packet_unref(&packet
);
1342 mVideo
.mQueue
.finish();
1343 mAudio
.mQueue
.finish();
1345 /* all done - wait for it */
1346 if(mVideoThread
.joinable())
1347 mVideoThread
.join();
1348 if(mAudioThread
.joinable())
1349 mAudioThread
.join();
1352 std::unique_lock
<std::mutex
> lock(mVideo
.mPictQMutex
);
1353 while(!mVideo
.mFinalUpdate
)
1354 mVideo
.mPictQCond
.wait(lock
);
1358 evt
.user
.type
= FF_MOVIE_DONE_EVENT
;
1359 SDL_PushEvent(&evt
);
1367 int main(int argc
, char *argv
[])
1369 std::unique_ptr
<MovieState
> movState
;
1373 std::cerr
<< "Usage: "<<argv
[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl
;
1376 /* Register all formats and codecs */
1378 /* Initialize networking protocols */
1379 avformat_network_init();
1381 if(SDL_Init(SDL_INIT_VIDEO
| SDL_INIT_TIMER
))
1383 std::cerr
<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl
;
1387 /* Make a window to put our video */
1388 SDL_Window
*screen
= SDL_CreateWindow(AppName
.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE
);
1391 std::cerr
<< "SDL: could not set video mode - exiting" <<std::endl
;
1394 /* Make a renderer to handle the texture image surface and rendering. */
1395 SDL_Renderer
*renderer
= SDL_CreateRenderer(screen
, -1, SDL_RENDERER_ACCELERATED
);
1398 SDL_RendererInfo rinf
{};
1401 /* Make sure the renderer supports IYUV textures. If not, fallback to a
1402 * software renderer. */
1403 if(SDL_GetRendererInfo(renderer
, &rinf
) == 0)
1405 for(Uint32 i
= 0;!ok
&& i
< rinf
.num_texture_formats
;i
++)
1406 ok
= (rinf
.texture_formats
[i
] == SDL_PIXELFORMAT_IYUV
);
1410 std::cerr
<< "IYUV pixelformat textures not supported on renderer "<<rinf
.name
<<std::endl
;
1411 SDL_DestroyRenderer(renderer
);
1416 renderer
= SDL_CreateRenderer(screen
, -1, SDL_RENDERER_SOFTWARE
);
1419 std::cerr
<< "SDL: could not create renderer - exiting" <<std::endl
;
1422 SDL_SetRenderDrawColor(renderer
, 0, 0, 0, 255);
1423 SDL_RenderFillRect(renderer
, nullptr);
1424 SDL_RenderPresent(renderer
);
1426 /* Open an audio device */
1428 ALCdevice
*device
= [argc
,argv
,&fileidx
]() -> ALCdevice
*
1430 ALCdevice
*dev
= NULL
;
1431 if(argc
> 3 && strcmp(argv
[1], "-device") == 0)
1434 dev
= alcOpenDevice(argv
[2]);
1436 std::cerr
<< "Failed to open \""<<argv
[2]<<"\" - trying default" <<std::endl
;
1438 return alcOpenDevice(nullptr);
1440 ALCcontext
*context
= alcCreateContext(device
, nullptr);
1441 if(!context
|| alcMakeContextCurrent(context
) == ALC_FALSE
)
1443 std::cerr
<< "Failed to set up audio device" <<std::endl
;
1445 alcDestroyContext(context
);
1449 const ALCchar
*name
= nullptr;
1450 if(alcIsExtensionPresent(device
, "ALC_ENUMERATE_ALL_EXT"))
1451 name
= alcGetString(device
, ALC_ALL_DEVICES_SPECIFIER
);
1452 if(!name
|| alcGetError(device
) != AL_NO_ERROR
)
1453 name
= alcGetString(device
, ALC_DEVICE_SPECIFIER
);
1454 std::cout
<< "Opened \""<<name
<<"\"" <<std::endl
;
1456 if(fileidx
< argc
&& strcmp(argv
[fileidx
], "-direct") == 0)
1459 do_direct_out
= true;
1462 while(fileidx
< argc
&& !movState
)
1464 movState
= std::unique_ptr
<MovieState
>(new MovieState(argv
[fileidx
++]));
1465 if(!movState
->prepare()) movState
= nullptr;
1469 std::cerr
<< "Could not start a video" <<std::endl
;
1472 movState
->setTitle(screen
);
1474 /* Default to going to the next movie at the end of one. */
1475 enum class EomAction
{
1477 } eom_action
= EomAction::Next
;
1479 while(SDL_WaitEvent(&event
) == 1)
1484 switch(event
.key
.keysym
.sym
)
1487 movState
->mQuit
= true;
1488 eom_action
= EomAction::Quit
;
1492 movState
->mQuit
= true;
1493 eom_action
= EomAction::Next
;
1501 case SDL_WINDOWEVENT
:
1502 switch(event
.window
.event
)
1504 case SDL_WINDOWEVENT_RESIZED
:
1505 SDL_SetRenderDrawColor(renderer
, 0, 0, 0, 255);
1506 SDL_RenderFillRect(renderer
, nullptr);
1515 movState
->mQuit
= true;
1516 eom_action
= EomAction::Quit
;
1519 case FF_UPDATE_EVENT
:
1520 reinterpret_cast<VideoState
*>(event
.user
.data1
)->updatePicture(
1525 case FF_REFRESH_EVENT
:
1526 reinterpret_cast<VideoState
*>(event
.user
.data1
)->refreshTimer(
1531 case FF_MOVIE_DONE_EVENT
:
1532 if(eom_action
!= EomAction::Quit
)
1535 while(fileidx
< argc
&& !movState
)
1537 movState
= std::unique_ptr
<MovieState
>(new MovieState(argv
[fileidx
++]));
1538 if(!movState
->prepare()) movState
= nullptr;
1542 movState
->setTitle(screen
);
1547 /* Nothing more to play. Shut everything down and quit. */
1550 alcMakeContextCurrent(nullptr);
1551 alcDestroyContext(context
);
1552 alcCloseDevice(device
);
1554 SDL_DestroyRenderer(renderer
);
1556 SDL_DestroyWindow(screen
);
1567 std::cerr
<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl
;