2 * Copyright 2009-2010, Stephan Aßmus <superstippi@gmx.de>
3 * Copyright 2014, Colin Günther <coling@gmx.de>
4 * All rights reserved. Distributed under the terms of the GNU L-GPL license.
7 #include "AVFormatReader.h"
15 #include <AutoDeleter.h>
17 #include <ByteOrder.h>
19 #include <MediaDefs.h>
20 #include <MediaFormats.h>
28 #include "DemuxerTable.h"
30 #include "Utilities.h"
33 //#define TRACE_AVFORMAT_READER
34 #ifdef TRACE_AVFORMAT_READER
36 # define TRACE_IO(a...)
37 # define TRACE_SEEK(a...) printf(a)
38 # define TRACE_FIND(a...)
39 # define TRACE_PACKET(a...)
42 # define TRACE_IO(a...)
43 # define TRACE_SEEK(a...)
44 # define TRACE_FIND(a...)
45 # define TRACE_PACKET(a...)
48 #define ERROR(a...) fprintf(stderr, a)
50 #if LIBAVCODEC_VERSION_INT < ((54 << 16) | (50 << 8))
51 #define AV_CODEC_ID_PCM_S16BE CODEC_ID_PCM_S16BE
52 #define AV_CODEC_ID_PCM_S16LE CODEC_ID_PCM_S16LE
53 #define AV_CODEC_ID_PCM_U16BE CODEC_ID_PCM_U16BE
54 #define AV_CODEC_ID_PCM_U16LE CODEC_ID_PCM_U16LE
55 #define AV_CODEC_ID_PCM_S8 CODEC_ID_PCM_S8
56 #define AV_CODEC_ID_PCM_U8 CODEC_ID_PCM_U8
59 static const int64 kNoPTSValue
= AV_NOPTS_VALUE
;
63 avformat_to_beos_byte_order(AVSampleFormat format
)
66 return B_MEDIA_HOST_ENDIAN
;
71 avdictionary_to_message(AVDictionary
* dictionary
, BMessage
* message
)
73 if (dictionary
== NULL
)
76 AVDictionaryEntry
* entry
= NULL
;
77 while ((entry
= av_dict_get(dictionary
, "", entry
,
78 AV_DICT_IGNORE_SUFFIX
))) {
79 // convert entry keys into something more meaningful using the names from
81 if (strcmp(entry
->key
, "TALB") == 0 || strcmp(entry
->key
, "TAL") == 0)
82 message
->AddString("album", entry
->value
);
83 else if (strcmp(entry
->key
, "TCOM") == 0)
84 message
->AddString("composer", entry
->value
);
85 else if (strcmp(entry
->key
, "TCON") == 0 || strcmp(entry
->key
, "TCO") == 0)
86 message
->AddString("genre", entry
->value
);
87 else if (strcmp(entry
->key
, "TCOP") == 0)
88 message
->AddString("copyright", entry
->value
);
89 else if (strcmp(entry
->key
, "TDRL") == 0 || strcmp(entry
->key
, "TDRC") == 0)
90 message
->AddString("date", entry
->value
);
91 else if (strcmp(entry
->key
, "TENC") == 0 || strcmp(entry
->key
, "TEN") == 0)
92 message
->AddString("encoded_by", entry
->value
);
93 else if (strcmp(entry
->key
, "TIT2") == 0 || strcmp(entry
->key
, "TT2") == 0)
94 message
->AddString("title", entry
->value
);
95 else if (strcmp(entry
->key
, "TLAN") == 0)
96 message
->AddString("language", entry
->value
);
97 else if (strcmp(entry
->key
, "TPE1") == 0 || strcmp(entry
->key
, "TP1") == 0)
98 message
->AddString("artist", entry
->value
);
99 else if (strcmp(entry
->key
, "TPE2") == 0 || strcmp(entry
->key
, "TP2") == 0)
100 message
->AddString("album_artist", entry
->value
);
101 else if (strcmp(entry
->key
, "TPE3") == 0 || strcmp(entry
->key
, "TP3") == 0)
102 message
->AddString("performer", entry
->value
);
103 else if (strcmp(entry
->key
, "TPOS") == 0)
104 message
->AddString("disc", entry
->value
);
105 else if (strcmp(entry
->key
, "TPUB") == 0)
106 message
->AddString("publisher", entry
->value
);
107 else if (strcmp(entry
->key
, "TRCK") == 0 || strcmp(entry
->key
, "TRK") == 0)
108 message
->AddString("track", entry
->value
);
109 else if (strcmp(entry
->key
, "TSOA") == 0)
110 message
->AddString("album-sort", entry
->value
);
111 else if (strcmp(entry
->key
, "TSOP") == 0)
112 message
->AddString("artist-sort", entry
->value
);
113 else if (strcmp(entry
->key
, "TSOT") == 0)
114 message
->AddString("title-sort", entry
->value
);
115 else if (strcmp(entry
->key
, "TSSE") == 0)
116 message
->AddString("encoder", entry
->value
);
117 else if (strcmp(entry
->key
, "TYER") == 0)
118 message
->AddString("year", entry
->value
);
120 message
->AddString(entry
->key
, entry
->value
);
125 // #pragma mark - StreamBase
130 StreamBase(BMediaIO
* source
,
131 BLocker
* sourceLock
, BLocker
* streamLock
);
132 virtual ~StreamBase();
134 // Init an indivual AVFormatContext
137 // Setup this stream to point to the AVStream at the given streamIndex.
138 virtual status_t
Init(int32 streamIndex
);
140 inline const AVFormatContext
* Context() const
143 int32
CountStreams() const;
144 int32
StreamIndexFor(int32 virtualIndex
) const;
145 inline int32
VirtualIndex() const
146 { return fVirtualIndex
; }
148 double FrameRate() const;
149 bigtime_t
Duration() const;
151 virtual status_t
Seek(uint32 flags
, int64
* frame
,
154 status_t
GetNextChunk(const void** chunkBuffer
,
156 media_header
* mediaHeader
);
159 // I/O hooks for libavformat, cookie will be a Stream instance.
160 // Since multiple StreamCookies use the same BMediaIO source, they
161 // maintain the position individually, and may need to seek the source
162 // if it does not match anymore in _Read().
163 static int _Read(void* cookie
, uint8
* buffer
,
165 static off_t
_Seek(void* cookie
, off_t offset
, int whence
);
167 status_t
_NextPacket(bool reuse
);
169 int64_t _ConvertToStreamTimeBase(bigtime_t time
) const;
170 bigtime_t
_ConvertFromStreamTimeBase(int64_t time
) const;
175 // Since different threads may read from the source,
176 // we need to protect the file position and I/O by a lock.
177 BLocker
* fSourceLock
;
179 BLocker
* fStreamLock
;
181 AVFormatContext
* fContext
;
185 media_format fFormat
;
187 AVIOContext
* fIOContext
;
193 bool fStreamBuildsIndexWhileReading
;
197 StreamBase::StreamBase(BMediaIO
* source
, BLocker
* sourceLock
,
202 fSourceLock(sourceLock
),
204 fStreamLock(streamLock
),
214 fStreamBuildsIndexWhileReading(false)
216 // NOTE: Don't use streamLock here, it may not yet be initialized!
218 av_new_packet(&fPacket
, 0);
219 memset(&fFormat
, 0, sizeof(media_format
));
223 StreamBase::~StreamBase()
225 if (fContext
!= NULL
)
226 avformat_close_input(&fContext
);
227 av_free_packet(&fPacket
);
229 if (fIOContext
!= NULL
)
230 av_free(fIOContext
->buffer
);
238 BAutolock
_(fStreamLock
);
241 size_t bufferSize
= 32768;
242 uint8
* buffer
= static_cast<uint8
*>(av_malloc(bufferSize
));
246 // First try to identify the file using the MIME database, as ffmpeg
247 // (especially old versions) is not very good at this and relies on us
248 // to give it the file extension as an hint.
249 // For this we need some valid data in the buffer, the first 512 bytes
250 // should do because our MIME sniffing never uses more.
251 const char* extension
= NULL
;
253 if (fSource
->Read(buffer
, 512) == 512) {
255 if (BMimeType::GuessMimeType(buffer
, 512, &type
) == B_OK
) {
256 if (type
.GetFileExtensions(&message
) == B_OK
) {
257 extension
= message
.FindString("extensions");
262 // If the format is not identified, try Amiga MOD-files, because these do
263 // not currently have a sniffing rule.
264 if (extension
== NULL
)
267 // Allocate I/O context with buffer and hook functions, pass ourself as
269 memset(buffer
, 0, bufferSize
);
270 fIOContext
= avio_alloc_context(buffer
, bufferSize
, 0, this, _Read
, 0,
272 if (fIOContext
== NULL
) {
273 TRACE("StreamBase::Open() - avio_alloc_context() failed!\n");
278 fContext
= avformat_alloc_context();
279 fContext
->pb
= fIOContext
;
281 // Allocate our context and probe the input format
282 if (avformat_open_input(&fContext
, extension
, NULL
, NULL
) < 0) {
283 TRACE("StreamBase::Open() - avformat_open_input() failed!\n");
284 // avformat_open_input() frees the context in case of failure
288 return B_NOT_SUPPORTED
;
291 TRACE("StreamBase::Open() - "
292 "avformat_open_input(): %s\n", fContext
->iformat
->name
);
293 TRACE(" flags:%s%s%s%s%s\n",
294 (fContext
->iformat
->flags
& AVFMT_GLOBALHEADER
) ? " AVFMT_GLOBALHEADER" : "",
295 (fContext
->iformat
->flags
& AVFMT_NOTIMESTAMPS
) ? " AVFMT_NOTIMESTAMPS" : "",
296 (fContext
->iformat
->flags
& AVFMT_GENERIC_INDEX
) ? " AVFMT_GENERIC_INDEX" : "",
297 (fContext
->iformat
->flags
& AVFMT_TS_DISCONT
) ? " AVFMT_TS_DISCONT" : "",
298 (fContext
->iformat
->flags
& AVFMT_VARIABLE_FPS
) ? " AVFMT_VARIABLE_FPS" : ""
302 // Retrieve stream information
303 if (avformat_find_stream_info(fContext
, NULL
) < 0) {
304 TRACE("StreamBase::Open() - avformat_find_stream_info() failed!\n");
305 return B_NOT_SUPPORTED
;
308 fSeekByBytes
= (fContext
->iformat
->flags
& AVFMT_TS_DISCONT
) != 0;
309 fStreamBuildsIndexWhileReading
310 = (fContext
->iformat
->flags
& AVFMT_GENERIC_INDEX
) != 0
313 TRACE("StreamBase::Open() - "
314 "av_find_stream_info() success! Seeking by bytes: %d\n",
322 StreamBase::Init(int32 virtualIndex
)
324 BAutolock
_(fStreamLock
);
326 TRACE("StreamBase::Init(%ld)\n", virtualIndex
);
328 if (fContext
== NULL
)
331 int32 streamIndex
= StreamIndexFor(virtualIndex
);
332 if (streamIndex
< 0) {
333 TRACE(" bad stream index!\n");
337 TRACE(" context stream index: %ld\n", streamIndex
);
339 // We need to remember the virtual index so that
340 // AVFormatReader::FreeCookie() can clear the correct stream entry.
341 fVirtualIndex
= virtualIndex
;
343 // Make us point to the AVStream at streamIndex
344 fStream
= fContext
->streams
[streamIndex
];
346 // NOTE: Discarding other streams works for most, but not all containers,
347 // for example it does not work for the ASF demuxer. Since I don't know what
348 // other demuxer it breaks, let's just keep reading packets for unwanted
349 // streams, it just makes the _GetNextPacket() function slightly less
351 // // Discard all other streams
352 // for (unsigned i = 0; i < fContext->nb_streams; i++) {
353 // if (i != (unsigned)streamIndex)
354 // fContext->streams[i]->discard = AVDISCARD_ALL;
362 StreamBase::Index() const
365 return fStream
->index
;
371 StreamBase::CountStreams() const
373 // Figure out the stream count. If the context has "AVPrograms", use
374 // the first program (for now).
375 // TODO: To support "programs" properly, the BMediaFile/Track API should
376 // be extended accordingly. I guess programs are like TV channels in the
377 // same satilite transport stream. Maybe call them "TrackGroups".
378 if (fContext
->nb_programs
> 0) {
379 // See libavformat/utils.c:dump_format()
380 return fContext
->programs
[0]->nb_stream_indexes
;
382 return fContext
->nb_streams
;
387 StreamBase::StreamIndexFor(int32 virtualIndex
) const
389 // NOTE: See CountStreams()
390 if (fContext
->nb_programs
> 0) {
391 const AVProgram
* program
= fContext
->programs
[0];
392 if (virtualIndex
>= 0
393 && virtualIndex
< (int32
)program
->nb_stream_indexes
) {
394 return program
->stream_index
[virtualIndex
];
397 if (virtualIndex
>= 0 && virtualIndex
< (int32
)fContext
->nb_streams
)
405 StreamBase::FrameRate() const
407 // TODO: Find a way to always calculate a correct frame rate...
408 double frameRate
= 1.0;
409 switch (fStream
->codec
->codec_type
) {
410 case AVMEDIA_TYPE_AUDIO
:
411 frameRate
= (double)fStream
->codec
->sample_rate
;
413 case AVMEDIA_TYPE_VIDEO
:
414 if (fStream
->avg_frame_rate
.den
&& fStream
->avg_frame_rate
.num
)
415 frameRate
= av_q2d(fStream
->avg_frame_rate
);
416 else if (fStream
->r_frame_rate
.den
&& fStream
->r_frame_rate
.num
)
417 frameRate
= av_q2d(fStream
->r_frame_rate
);
418 else if (fStream
->time_base
.den
&& fStream
->time_base
.num
)
419 frameRate
= 1 / av_q2d(fStream
->time_base
);
420 else if (fStream
->codec
->time_base
.den
421 && fStream
->codec
->time_base
.num
) {
422 frameRate
= 1 / av_q2d(fStream
->codec
->time_base
);
425 // TODO: Fix up interlaced video for real
426 if (frameRate
== 50.0f
)
432 if (frameRate
<= 0.0)
439 StreamBase::Duration() const
441 // TODO: This is not working correctly for all stream types...
442 // It seems that the calculations here are correct, because they work
443 // for a couple of streams and are in line with the documentation, but
444 // unfortunately, libavformat itself seems to set the time_base and
445 // duration wrongly sometimes. :-(
446 if ((int64
)fStream
->duration
!= kNoPTSValue
)
447 return _ConvertFromStreamTimeBase(fStream
->duration
);
448 else if ((int64
)fContext
->duration
!= kNoPTSValue
)
449 return (bigtime_t
)fContext
->duration
;
456 StreamBase::Seek(uint32 flags
, int64
* frame
, bigtime_t
* time
)
458 BAutolock
_(fStreamLock
);
460 if (fContext
== NULL
|| fStream
== NULL
)
463 TRACE_SEEK("StreamBase::Seek(%ld,%s%s%s%s, %lld, "
464 "%lld)\n", VirtualIndex(),
465 (flags
& B_MEDIA_SEEK_TO_FRAME
) ? " B_MEDIA_SEEK_TO_FRAME" : "",
466 (flags
& B_MEDIA_SEEK_TO_TIME
) ? " B_MEDIA_SEEK_TO_TIME" : "",
467 (flags
& B_MEDIA_SEEK_CLOSEST_BACKWARD
)
468 ? " B_MEDIA_SEEK_CLOSEST_BACKWARD" : "",
469 (flags
& B_MEDIA_SEEK_CLOSEST_FORWARD
)
470 ? " B_MEDIA_SEEK_CLOSEST_FORWARD" : "",
473 double frameRate
= FrameRate();
474 if ((flags
& B_MEDIA_SEEK_TO_FRAME
) != 0) {
475 // Seeking is always based on time, initialize it when client seeks
477 *time
= (bigtime_t
)(*frame
* 1000000.0 / frameRate
+ 0.5);
480 int64_t timeStamp
= *time
;
482 int searchFlags
= AVSEEK_FLAG_BACKWARD
;
483 if ((flags
& B_MEDIA_SEEK_CLOSEST_FORWARD
) != 0)
487 searchFlags
|= AVSEEK_FLAG_BYTE
;
489 BAutolock
_(fSourceLock
);
492 if (fSource
->GetSize(&fileSize
) != B_OK
)
493 return B_NOT_SUPPORTED
;
495 int64_t duration
= Duration();
497 return B_NOT_SUPPORTED
;
499 timeStamp
= int64_t(fileSize
* ((double)timeStamp
/ duration
));
500 if ((flags
& B_MEDIA_SEEK_CLOSEST_BACKWARD
) != 0) {
506 bool seekAgain
= true;
507 bool seekForward
= true;
508 bigtime_t lastFoundTime
= -1;
509 int64_t closestTimeStampBackwards
= -1;
511 if (avformat_seek_file(fContext
, -1, INT64_MIN
, timeStamp
,
512 INT64_MAX
, searchFlags
) < 0) {
513 TRACE(" avformat_seek_file() (by bytes) failed.\n");
518 // Our last packet is toast in any case. Read the next one so we
519 // know where we really seeked.
520 fReusePacket
= false;
521 if (_NextPacket(true) == B_OK
) {
522 while (fPacket
.pts
== kNoPTSValue
) {
523 fReusePacket
= false;
524 if (_NextPacket(true) != B_OK
)
527 if (fPacket
.pos
>= 0)
528 timeStamp
= fPacket
.pos
;
530 = _ConvertFromStreamTimeBase(fPacket
.pts
);
531 if (foundTime
!= lastFoundTime
) {
532 lastFoundTime
= foundTime
;
533 if (foundTime
> *time
) {
534 if (closestTimeStampBackwards
>= 0) {
535 timeStamp
= closestTimeStampBackwards
;
540 int64_t diff
= int64_t(fileSize
541 * ((double)(foundTime
- *time
) / (2 * duration
)));
545 TRACE_SEEK(" need to seek back (%lld) (time: %.2f "
546 "-> %.2f)\n", timeStamp
, *time
/ 1000000.0,
547 foundTime
/ 1000000.0);
554 } else if (seekForward
&& foundTime
< *time
- 100000) {
555 closestTimeStampBackwards
= timeStamp
;
556 int64_t diff
= int64_t(fileSize
557 * ((double)(*time
- foundTime
) / (2 * duration
)));
561 TRACE_SEEK(" need to seek forward (%lld) (time: "
562 "%.2f -> %.2f)\n", timeStamp
, *time
/ 1000000.0,
563 foundTime
/ 1000000.0);
564 if (timeStamp
> duration
)
565 foundTime
= duration
;
572 TRACE_SEEK(" found time: %lld -> %lld (%.2f)\n", *time
,
573 foundTime
, foundTime
/ 1000000.0);
575 *frame
= (uint64
)(*time
* frameRate
/ 1000000LL + 0.5);
576 TRACE_SEEK(" seeked frame: %lld\n", *frame
);
578 TRACE_SEEK(" _NextPacket() failed!\n");
583 // We may not get a PTS from the next packet after seeking, so
584 // we try to get an expected time from the index.
585 int64_t streamTimeStamp
= _ConvertToStreamTimeBase(*time
);
586 int index
= av_index_search_timestamp(fStream
, streamTimeStamp
,
589 TRACE(" av_index_search_timestamp() failed\n");
592 const AVIndexEntry
& entry
= fStream
->index_entries
[index
];
593 streamTimeStamp
= entry
.timestamp
;
595 // Some demuxers use the first index entry to store some
596 // other information, like the total playing time for example.
597 // Assume the timeStamp of the first entry is alays 0.
598 // TODO: Handle start-time offset?
601 bigtime_t foundTime
= _ConvertFromStreamTimeBase(streamTimeStamp
);
602 bigtime_t timeDiff
= foundTime
> *time
603 ? foundTime
- *time
: *time
- foundTime
;
605 if (timeDiff
> 1000000
606 && (fStreamBuildsIndexWhileReading
607 || index
== fStream
->nb_index_entries
- 1)) {
608 // If the stream is building the index on the fly while parsing
609 // it, we only have entries in the index for positions already
610 // decoded, i.e. we cannot seek into the future. In that case,
611 // just assume that we can seek where we want and leave
612 // time/frame unmodified. Since successfully seeking one time
613 // will generate index entries for the seeked to position, we
614 // need to remember this in fStreamBuildsIndexWhileReading,
615 // since when seeking back there will be later index entries,
616 // but we still want to ignore the found entry.
617 fStreamBuildsIndexWhileReading
= true;
618 TRACE_SEEK(" Not trusting generic index entry. "
619 "(Current count: %d)\n", fStream
->nb_index_entries
);
621 // If we found a reasonably time, write it into *time.
622 // After seeking, we will try to read the sought time from
623 // the next packet. If the packet has no PTS value, we may
624 // still have a more accurate time from the index lookup.
629 if (avformat_seek_file(fContext
, -1, INT64_MIN
, timeStamp
, INT64_MAX
,
631 TRACE(" avformat_seek_file() failed.\n");
632 // Try to fall back to av_seek_frame()
633 timeStamp
= _ConvertToStreamTimeBase(timeStamp
);
634 if (av_seek_frame(fContext
, fStream
->index
, timeStamp
,
636 TRACE(" avformat_seek_frame() failed as well.\n");
637 // Fall back to seeking to the beginning by bytes
639 if (av_seek_frame(fContext
, fStream
->index
, timeStamp
,
640 AVSEEK_FLAG_BYTE
) < 0) {
641 TRACE(" avformat_seek_frame() by bytes failed as "
643 // Do not propagate error in any case. We fail if we can't
644 // read another packet.
650 // Our last packet is toast in any case. Read the next one so
651 // we know where we really sought.
652 bigtime_t foundTime
= *time
;
654 fReusePacket
= false;
655 if (_NextPacket(true) == B_OK
) {
656 if (fPacket
.pts
!= kNoPTSValue
)
657 foundTime
= _ConvertFromStreamTimeBase(fPacket
.pts
);
659 TRACE_SEEK(" no PTS in packet after seeking\n");
661 TRACE_SEEK(" _NextPacket() failed!\n");
664 TRACE_SEEK(" sought time: %.2fs\n", *time
/ 1000000.0);
665 *frame
= (uint64
)(*time
* frameRate
/ 1000000.0 + 0.5);
666 TRACE_SEEK(" sought frame: %lld\n", *frame
);
674 StreamBase::GetNextChunk(const void** chunkBuffer
,
675 size_t* chunkSize
, media_header
* mediaHeader
)
677 BAutolock
_(fStreamLock
);
679 TRACE_PACKET("StreamBase::GetNextChunk()\n");
681 // Get the last stream DTS before reading the next packet, since
682 // then it points to that one.
683 int64 lastStreamDTS
= fStream
->cur_dts
;
685 status_t ret
= _NextPacket(false);
692 // NOTE: AVPacket has a field called "convergence_duration", for which
693 // the documentation is quite interesting. It sounds like it could be
694 // used to know the time until the next I-Frame in streams that don't
695 // let you know the position of keyframes in another way (like through
698 // According to libavformat documentation, fPacket is valid until the
699 // next call to av_read_frame(). This is what we want and we can share
700 // the memory with the least overhead.
701 *chunkBuffer
= fPacket
.data
;
702 *chunkSize
= fPacket
.size
;
704 if (mediaHeader
!= NULL
) {
705 mediaHeader
->type
= fFormat
.type
;
706 mediaHeader
->buffer
= 0;
707 mediaHeader
->destination
= -1;
708 mediaHeader
->time_source
= -1;
709 mediaHeader
->size_used
= fPacket
.size
;
711 // FFmpeg recommends to use the decoding time stamps as primary source
712 // for presentation time stamps, especially for video formats that are
713 // using frame reordering. More over this way it is ensured that the
714 // returned start times are ordered in a monotonically increasing time
715 // series (even for videos that contain B-frames).
716 // \see http://git.videolan.org/?p=ffmpeg.git;a=blob;f=libavformat/avformat.h;h=1e8a6294890d580cd9ebc684eaf4ce57c8413bd8;hb=9153b33a742c4e2a85ff6230aea0e75f5a8b26c2#l1623
717 bigtime_t presentationTimeStamp
;
718 if (fPacket
.dts
!= kNoPTSValue
)
719 presentationTimeStamp
= fPacket
.dts
;
720 else if (fPacket
.pts
!= kNoPTSValue
)
721 presentationTimeStamp
= fPacket
.pts
;
723 presentationTimeStamp
= lastStreamDTS
;
725 mediaHeader
->start_time
= _ConvertFromStreamTimeBase(presentationTimeStamp
);
726 mediaHeader
->file_pos
= fPacket
.pos
;
727 mediaHeader
->data_offset
= 0;
728 switch (mediaHeader
->type
) {
729 case B_MEDIA_RAW_AUDIO
:
731 case B_MEDIA_ENCODED_AUDIO
:
732 mediaHeader
->u
.encoded_audio
.buffer_flags
733 = (fPacket
.flags
& AV_PKT_FLAG_KEY
) ? B_MEDIA_KEY_FRAME
: 0;
735 case B_MEDIA_RAW_VIDEO
:
736 mediaHeader
->u
.raw_video
.line_count
737 = fFormat
.u
.raw_video
.display
.line_count
;
739 case B_MEDIA_ENCODED_VIDEO
:
740 mediaHeader
->u
.encoded_video
.field_flags
741 = (fPacket
.flags
& AV_PKT_FLAG_KEY
) ? B_MEDIA_KEY_FRAME
: 0;
742 mediaHeader
->u
.encoded_video
.line_count
743 = fFormat
.u
.encoded_video
.output
.display
.line_count
;
750 // static bigtime_t pts[2];
751 // static bigtime_t lastPrintTime = system_time();
752 // static BLocker printLock;
753 // if (fStream->index < 2) {
754 // if (fPacket.pts != kNoPTSValue)
755 // pts[fStream->index] = _ConvertFromStreamTimeBase(fPacket.pts);
757 // bigtime_t now = system_time();
758 // if (now - lastPrintTime > 1000000) {
759 // printf("PTS: %.4f/%.4f, diff: %.4f\r", pts[0] / 1000000.0,
760 // pts[1] / 1000000.0, (pts[0] - pts[1]) / 1000000.0);
762 // lastPrintTime = now;
764 // printLock.Unlock();
775 StreamBase::_Read(void* cookie
, uint8
* buffer
, int bufferSize
)
777 StreamBase
* stream
= reinterpret_cast<StreamBase
*>(cookie
);
779 BAutolock
_(stream
->fSourceLock
);
781 TRACE_IO("StreamBase::_Read(%p, %p, %d) position: %lld\n",
782 cookie
, buffer
, bufferSize
, stream
->fPosition
);
784 if (stream
->fPosition
!= stream
->fSource
->Position()) {
785 TRACE_IO("StreamBase::_Read fSource position: %lld\n",
786 stream
->fSource
->Position());
789 = stream
->fSource
->Seek(stream
->fPosition
, SEEK_SET
);
790 if (position
!= stream
->fPosition
)
794 ssize_t read
= stream
->fSource
->Read(buffer
, bufferSize
);
796 stream
->fPosition
+= read
;
798 TRACE_IO(" read: %ld\n", read
);
805 StreamBase::_Seek(void* cookie
, off_t offset
, int whence
)
807 TRACE_IO("StreamBase::_Seek(%p, %lld, %d)\n",
808 cookie
, offset
, whence
);
810 StreamBase
* stream
= reinterpret_cast<StreamBase
*>(cookie
);
812 BAutolock
_(stream
->fSourceLock
);
814 // Support for special file size retrieval API without seeking
816 if (whence
== AVSEEK_SIZE
) {
818 if (stream
->fSource
->GetSize(&size
) == B_OK
)
823 // If not requested to seek to an absolute position, we need to
824 // confirm that the stream is currently at the position that we
826 if (whence
!= SEEK_SET
827 && stream
->fPosition
!= stream
->fSource
->Position()) {
829 = stream
->fSource
->Seek(stream
->fPosition
, SEEK_SET
);
830 if (position
!= stream
->fPosition
)
834 off_t position
= stream
->fSource
->Seek(offset
, whence
);
835 TRACE_IO(" position: %lld\n", position
);
839 stream
->fPosition
= position
;
846 StreamBase::_NextPacket(bool reuse
)
848 TRACE_PACKET("StreamBase::_NextPacket(%d)\n", reuse
);
851 // The last packet was marked for reuse, so we keep using it.
852 TRACE_PACKET(" re-using last packet\n");
853 fReusePacket
= reuse
;
857 av_free_packet(&fPacket
);
860 if (av_read_frame(fContext
, &fPacket
) < 0) {
861 // NOTE: Even though we may get the error for a different stream,
862 // av_read_frame() is not going to be successful from here on, so
864 fReusePacket
= false;
865 return B_LAST_BUFFER_ERROR
;
868 if (fPacket
.stream_index
== Index())
871 // This is a packet from another stream, ignore it.
872 av_free_packet(&fPacket
);
875 // Mark this packet with the new reuse flag.
876 fReusePacket
= reuse
;
882 StreamBase::_ConvertToStreamTimeBase(bigtime_t time
) const
884 int64 timeStamp
= int64_t((double)time
* fStream
->time_base
.den
885 / (1000000.0 * fStream
->time_base
.num
) + 0.5);
886 if (fStream
->start_time
!= kNoPTSValue
)
887 timeStamp
+= fStream
->start_time
;
893 StreamBase::_ConvertFromStreamTimeBase(int64_t time
) const
895 if (fStream
->start_time
!= kNoPTSValue
)
896 time
-= fStream
->start_time
;
898 return bigtime_t(1000000.0 * time
* fStream
->time_base
.num
899 / fStream
->time_base
.den
+ 0.5);
903 // #pragma mark - AVFormatReader::Stream
906 class AVFormatReader::Stream
: public StreamBase
{
908 Stream(BMediaIO
* source
,
909 BLocker
* streamLock
);
912 // Setup this stream to point to the AVStream at the given streamIndex.
913 // This will also initialize the media_format.
914 virtual status_t
Init(int32 streamIndex
);
916 status_t
GetMetaData(BMessage
* data
);
918 // Support for AVFormatReader
919 status_t
GetStreamInfo(int64
* frameCount
,
920 bigtime_t
* duration
, media_format
* format
,
921 const void** infoBuffer
,
922 size_t* infoSize
) const;
924 status_t
FindKeyFrame(uint32 flags
, int64
* frame
,
925 bigtime_t
* time
) const;
926 virtual status_t
Seek(uint32 flags
, int64
* frame
,
930 mutable BLocker fLock
;
932 struct KeyframeInfo
{
933 bigtime_t requestedTime
;
934 int64 requestedFrame
;
935 bigtime_t reportedTime
;
939 mutable KeyframeInfo fLastReportedKeyframe
;
940 mutable StreamBase
* fGhostStream
;
945 AVFormatReader::Stream::Stream(BMediaIO
* source
, BLocker
* streamLock
)
947 StreamBase(source
, streamLock
, &fLock
),
948 fLock("stream lock"),
951 fLastReportedKeyframe
.requestedTime
= 0;
952 fLastReportedKeyframe
.requestedFrame
= 0;
953 fLastReportedKeyframe
.reportedTime
= 0;
954 fLastReportedKeyframe
.reportedFrame
= 0;
958 AVFormatReader::Stream::~Stream()
965 AVFormatReader::Stream::Init(int32 virtualIndex
)
967 TRACE("AVFormatReader::Stream::Init(%ld)\n", virtualIndex
);
969 status_t ret
= StreamBase::Init(virtualIndex
);
973 // Get a pointer to the AVCodecContext for the stream at streamIndex.
974 AVCodecContext
* codecContext
= fStream
->codec
;
977 // stippi: Here I was experimenting with the question if some fields of the
978 // AVCodecContext change (or get filled out at all), if the AVCodec is opened.
981 CodecOpener(AVCodecContext
* context
)
983 fCodecContext
= context
;
984 AVCodec
* codec
= avcodec_find_decoder(context
->codec_id
);
985 fCodecOpen
= avcodec_open(context
, codec
) >= 0;
987 TRACE(" failed to open the codec!\n");
992 avcodec_close(fCodecContext
);
995 AVCodecContext
* fCodecContext
;
997 } codecOpener(codecContext
);
1000 // initialize the media_format for this stream
1001 media_format
* format
= &fFormat
;
1002 memset(format
, 0, sizeof(media_format
));
1004 media_format_description description
;
1006 // Set format family and type depending on codec_type of the stream.
1007 switch (codecContext
->codec_type
) {
1008 case AVMEDIA_TYPE_AUDIO
:
1009 if ((codecContext
->codec_id
>= AV_CODEC_ID_PCM_S16LE
)
1010 && (codecContext
->codec_id
<= AV_CODEC_ID_PCM_U8
)) {
1011 TRACE(" raw audio\n");
1012 format
->type
= B_MEDIA_RAW_AUDIO
;
1013 description
.family
= B_ANY_FORMAT_FAMILY
;
1014 // This will then apparently be handled by the (built into
1015 // BMediaTrack) RawDecoder.
1017 TRACE(" encoded audio\n");
1018 format
->type
= B_MEDIA_ENCODED_AUDIO
;
1019 description
.family
= B_MISC_FORMAT_FAMILY
;
1020 description
.u
.misc
.file_format
= 'ffmp';
1023 case AVMEDIA_TYPE_VIDEO
:
1024 TRACE(" encoded video\n");
1025 format
->type
= B_MEDIA_ENCODED_VIDEO
;
1026 description
.family
= B_MISC_FORMAT_FAMILY
;
1027 description
.u
.misc
.file_format
= 'ffmp';
1030 TRACE(" unknown type\n");
1031 format
->type
= B_MEDIA_UNKNOWN_TYPE
;
1036 if (format
->type
== B_MEDIA_RAW_AUDIO
) {
1037 // We cannot describe all raw-audio formats, some are unsupported.
1038 switch (codecContext
->codec_id
) {
1039 case AV_CODEC_ID_PCM_S16LE
:
1040 format
->u
.raw_audio
.format
1041 = media_raw_audio_format::B_AUDIO_SHORT
;
1042 format
->u
.raw_audio
.byte_order
1043 = B_MEDIA_LITTLE_ENDIAN
;
1045 case AV_CODEC_ID_PCM_S16BE
:
1046 format
->u
.raw_audio
.format
1047 = media_raw_audio_format::B_AUDIO_SHORT
;
1048 format
->u
.raw_audio
.byte_order
1049 = B_MEDIA_BIG_ENDIAN
;
1051 case AV_CODEC_ID_PCM_U16LE
:
1052 // format->u.raw_audio.format
1053 // = media_raw_audio_format::B_AUDIO_USHORT;
1054 // format->u.raw_audio.byte_order
1055 // = B_MEDIA_LITTLE_ENDIAN;
1056 return B_NOT_SUPPORTED
;
1058 case AV_CODEC_ID_PCM_U16BE
:
1059 // format->u.raw_audio.format
1060 // = media_raw_audio_format::B_AUDIO_USHORT;
1061 // format->u.raw_audio.byte_order
1062 // = B_MEDIA_BIG_ENDIAN;
1063 return B_NOT_SUPPORTED
;
1065 case AV_CODEC_ID_PCM_S8
:
1066 format
->u
.raw_audio
.format
1067 = media_raw_audio_format::B_AUDIO_CHAR
;
1069 case AV_CODEC_ID_PCM_U8
:
1070 format
->u
.raw_audio
.format
1071 = media_raw_audio_format::B_AUDIO_UCHAR
;
1074 return B_NOT_SUPPORTED
;
1078 if (description
.family
== B_MISC_FORMAT_FAMILY
)
1079 description
.u
.misc
.codec
= codecContext
->codec_id
;
1081 BMediaFormats formats
;
1082 status_t status
= formats
.GetFormatFor(description
, format
);
1084 TRACE(" formats.GetFormatFor() error: %s\n", strerror(status
));
1086 format
->user_data_type
= B_CODEC_TYPE_INFO
;
1087 *(uint32
*)format
->user_data
= codecContext
->codec_tag
;
1088 format
->user_data
[4] = 0;
1091 format
->require_flags
= 0;
1092 format
->deny_flags
= B_MEDIA_MAUI_UNDEFINED_FLAGS
;
1094 switch (format
->type
) {
1095 case B_MEDIA_RAW_AUDIO
:
1096 format
->u
.raw_audio
.frame_rate
= (float)codecContext
->sample_rate
;
1097 format
->u
.raw_audio
.channel_count
= codecContext
->channels
;
1098 format
->u
.raw_audio
.channel_mask
= codecContext
->channel_layout
;
1099 ConvertAVSampleFormatToRawAudioFormat(codecContext
->sample_fmt
,
1100 format
->u
.raw_audio
.format
);
1101 format
->u
.raw_audio
.buffer_size
= 0;
1103 // Read one packet and mark it for later re-use. (So our first
1104 // GetNextChunk() call does not read another packet.)
1105 if (_NextPacket(true) == B_OK
) {
1106 TRACE(" successfully determined audio buffer size: %d\n",
1108 format
->u
.raw_audio
.buffer_size
= fPacket
.size
;
1112 case B_MEDIA_ENCODED_AUDIO
:
1113 format
->u
.encoded_audio
.bit_rate
= codecContext
->bit_rate
;
1114 format
->u
.encoded_audio
.frame_size
= codecContext
->frame_size
;
1115 // Fill in some info about possible output format
1116 format
->u
.encoded_audio
.output
1117 = media_multi_audio_format::wildcard
;
1118 format
->u
.encoded_audio
.output
.frame_rate
1119 = (float)codecContext
->sample_rate
;
1120 // Channel layout bits match in Be API and FFmpeg.
1121 format
->u
.encoded_audio
.output
.channel_count
1122 = codecContext
->channels
;
1123 format
->u
.encoded_audio
.multi_info
.channel_mask
1124 = codecContext
->channel_layout
;
1125 format
->u
.encoded_audio
.output
.byte_order
1126 = avformat_to_beos_byte_order(codecContext
->sample_fmt
);
1127 ConvertAVSampleFormatToRawAudioFormat(codecContext
->sample_fmt
,
1128 format
->u
.encoded_audio
.output
.format
);
1129 if (codecContext
->block_align
> 0) {
1130 format
->u
.encoded_audio
.output
.buffer_size
1131 = codecContext
->block_align
;
1133 format
->u
.encoded_audio
.output
.buffer_size
1134 = codecContext
->frame_size
* codecContext
->channels
1135 * (format
->u
.encoded_audio
.output
.format
1136 & media_raw_audio_format::B_AUDIO_SIZE_MASK
);
1140 case B_MEDIA_ENCODED_VIDEO
:
1141 // TODO: Specifying any of these seems to throw off the format matching
1143 // format->u.encoded_video.avg_bit_rate = codecContext->bit_rate;
1144 // format->u.encoded_video.max_bit_rate = codecContext->bit_rate
1145 // + codecContext->bit_rate_tolerance;
1147 // format->u.encoded_video.encoding
1148 // = media_encoded_video_format::B_ANY;
1150 // format->u.encoded_video.frame_size = 1;
1151 // format->u.encoded_video.forward_history = 0;
1152 // format->u.encoded_video.backward_history = 0;
1154 format
->u
.encoded_video
.output
.field_rate
= FrameRate();
1155 format
->u
.encoded_video
.output
.interlace
= 1;
1157 format
->u
.encoded_video
.output
.first_active
= 0;
1158 format
->u
.encoded_video
.output
.last_active
1159 = codecContext
->height
- 1;
1160 // TODO: Maybe libavformat actually provides that info
1162 format
->u
.encoded_video
.output
.orientation
1163 = B_VIDEO_TOP_LEFT_RIGHT
;
1165 ConvertAVCodecContextToVideoAspectWidthAndHeight(*codecContext
,
1166 format
->u
.encoded_video
.output
.pixel_width_aspect
,
1167 format
->u
.encoded_video
.output
.pixel_height_aspect
);
1169 format
->u
.encoded_video
.output
.display
.format
1170 = pixfmt_to_colorspace(codecContext
->pix_fmt
);
1171 format
->u
.encoded_video
.output
.display
.line_width
1172 = codecContext
->width
;
1173 format
->u
.encoded_video
.output
.display
.line_count
1174 = codecContext
->height
;
1175 TRACE(" width/height: %d/%d\n", codecContext
->width
,
1176 codecContext
->height
);
1177 format
->u
.encoded_video
.output
.display
.bytes_per_row
= 0;
1178 format
->u
.encoded_video
.output
.display
.pixel_offset
= 0;
1179 format
->u
.encoded_video
.output
.display
.line_offset
= 0;
1180 format
->u
.encoded_video
.output
.display
.flags
= 0; // TODO
1185 // This is an unknown format to us.
1189 // Add the meta data, if any
1190 if (codecContext
->extradata_size
> 0) {
1191 format
->SetMetaData(codecContext
->extradata
,
1192 codecContext
->extradata_size
);
1193 TRACE(" extradata: %p\n", format
->MetaData());
1196 TRACE(" extradata_size: %d\n", codecContext
->extradata_size
);
1197 // TRACE(" intra_matrix: %p\n", codecContext->intra_matrix);
1198 // TRACE(" inter_matrix: %p\n", codecContext->inter_matrix);
1199 // TRACE(" get_buffer(): %p\n", codecContext->get_buffer);
1200 // TRACE(" release_buffer(): %p\n", codecContext->release_buffer);
1202 #ifdef TRACE_AVFORMAT_READER
1203 char formatString
[512];
1204 if (string_for_format(*format
, formatString
, sizeof(formatString
)))
1205 TRACE(" format: %s\n", formatString
);
1207 uint32 encoding
= format
->Encoding();
1208 TRACE(" encoding '%.4s'\n", (char*)&encoding
);
1216 AVFormatReader::Stream::GetMetaData(BMessage
* data
)
1218 BAutolock
_(&fLock
);
1220 avdictionary_to_message(fStream
->metadata
, data
);
1227 AVFormatReader::Stream::GetStreamInfo(int64
* frameCount
,
1228 bigtime_t
* duration
, media_format
* format
, const void** infoBuffer
,
1229 size_t* infoSize
) const
1231 BAutolock
_(&fLock
);
1233 TRACE("AVFormatReader::Stream::GetStreamInfo(%ld)\n",
1236 double frameRate
= FrameRate();
1237 TRACE(" frameRate: %.4f\n", frameRate
);
1239 #ifdef TRACE_AVFORMAT_READER
1240 if (fStream
->start_time
!= kNoPTSValue
) {
1241 bigtime_t startTime
= _ConvertFromStreamTimeBase(fStream
->start_time
);
1242 TRACE(" start_time: %lld or %.5fs\n", startTime
,
1243 startTime
/ 1000000.0);
1244 // TODO: Handle start time in FindKeyFrame() and Seek()?!
1246 #endif // TRACE_AVFORMAT_READER
1248 *duration
= Duration();
1250 TRACE(" duration: %lld or %.5fs\n", *duration
, *duration
/ 1000000.0);
1253 if (fStream
->nb_index_entries
> 0) {
1254 TRACE(" dump of index entries:\n");
1256 int firstEntriesCount
= min_c(fStream
->nb_index_entries
, count
);
1258 for (; i
< firstEntriesCount
; i
++) {
1259 AVIndexEntry
& entry
= fStream
->index_entries
[i
];
1260 bigtime_t timeGlobal
= entry
.timestamp
;
1261 bigtime_t timeNative
= _ConvertFromStreamTimeBase(timeGlobal
);
1262 TRACE(" [%d] native: %.5fs global: %.5fs\n", i
,
1263 timeNative
/ 1000000.0f
, timeGlobal
/ 1000000.0f
);
1265 if (fStream
->nb_index_entries
- count
> i
) {
1266 i
= fStream
->nb_index_entries
- count
;
1268 for (; i
< fStream
->nb_index_entries
; i
++) {
1269 AVIndexEntry
& entry
= fStream
->index_entries
[i
];
1270 bigtime_t timeGlobal
= entry
.timestamp
;
1271 bigtime_t timeNative
= _ConvertFromStreamTimeBase(timeGlobal
);
1272 TRACE(" [%d] native: %.5fs global: %.5fs\n", i
,
1273 timeNative
/ 1000000.0f
, timeGlobal
/ 1000000.0f
);
1279 *frameCount
= fStream
->nb_frames
* fStream
->codec
->frame_size
;
1280 if (*frameCount
== 0) {
1281 // Calculate from duration and frame rate
1282 *frameCount
= (int64
)(*duration
* frameRate
/ 1000000LL);
1283 TRACE(" frameCount calculated: %lld, from context: %lld\n",
1284 *frameCount
, fStream
->nb_frames
);
1286 TRACE(" frameCount: %lld\n", *frameCount
);
1290 *infoBuffer
= fStream
->codec
->extradata
;
1291 *infoSize
= fStream
->codec
->extradata_size
;
1298 AVFormatReader::Stream::FindKeyFrame(uint32 flags
, int64
* frame
,
1299 bigtime_t
* time
) const
1301 BAutolock
_(&fLock
);
1303 if (fContext
== NULL
|| fStream
== NULL
)
1306 TRACE_FIND("AVFormatReader::Stream::FindKeyFrame(%ld,%s%s%s%s, "
1307 "%lld, %lld)\n", VirtualIndex(),
1308 (flags
& B_MEDIA_SEEK_TO_FRAME
) ? " B_MEDIA_SEEK_TO_FRAME" : "",
1309 (flags
& B_MEDIA_SEEK_TO_TIME
) ? " B_MEDIA_SEEK_TO_TIME" : "",
1310 (flags
& B_MEDIA_SEEK_CLOSEST_BACKWARD
)
1311 ? " B_MEDIA_SEEK_CLOSEST_BACKWARD" : "",
1312 (flags
& B_MEDIA_SEEK_CLOSEST_FORWARD
)
1313 ? " B_MEDIA_SEEK_CLOSEST_FORWARD" : "",
1316 bool inLastRequestedRange
= false;
1317 if ((flags
& B_MEDIA_SEEK_TO_FRAME
) != 0) {
1318 if (fLastReportedKeyframe
.reportedFrame
1319 <= fLastReportedKeyframe
.requestedFrame
) {
1320 inLastRequestedRange
1321 = *frame
>= fLastReportedKeyframe
.reportedFrame
1322 && *frame
<= fLastReportedKeyframe
.requestedFrame
;
1324 inLastRequestedRange
1325 = *frame
>= fLastReportedKeyframe
.requestedFrame
1326 && *frame
<= fLastReportedKeyframe
.reportedFrame
;
1328 } else if ((flags
& B_MEDIA_SEEK_TO_FRAME
) == 0) {
1329 if (fLastReportedKeyframe
.reportedTime
1330 <= fLastReportedKeyframe
.requestedTime
) {
1331 inLastRequestedRange
1332 = *time
>= fLastReportedKeyframe
.reportedTime
1333 && *time
<= fLastReportedKeyframe
.requestedTime
;
1335 inLastRequestedRange
1336 = *time
>= fLastReportedKeyframe
.requestedTime
1337 && *time
<= fLastReportedKeyframe
.reportedTime
;
1341 if (inLastRequestedRange
) {
1342 *frame
= fLastReportedKeyframe
.reportedFrame
;
1343 *time
= fLastReportedKeyframe
.reportedTime
;
1344 TRACE_FIND(" same as last reported keyframe\n");
1348 double frameRate
= FrameRate();
1349 if ((flags
& B_MEDIA_SEEK_TO_FRAME
) != 0)
1350 *time
= (bigtime_t
)(*frame
* 1000000.0 / frameRate
+ 0.5);
1353 if (fGhostStream
== NULL
) {
1354 BAutolock
_(fSourceLock
);
1356 fGhostStream
= new(std::nothrow
) StreamBase(fSource
, fSourceLock
,
1358 if (fGhostStream
== NULL
) {
1359 TRACE(" failed to allocate ghost stream\n");
1363 ret
= fGhostStream
->Open();
1365 TRACE(" ghost stream failed to open: %s\n", strerror(ret
));
1369 ret
= fGhostStream
->Init(fVirtualIndex
);
1371 TRACE(" ghost stream failed to init: %s\n", strerror(ret
));
1375 fLastReportedKeyframe
.requestedFrame
= *frame
;
1376 fLastReportedKeyframe
.requestedTime
= *time
;
1377 fLastReportedKeyframe
.seekFlags
= flags
;
1379 ret
= fGhostStream
->Seek(flags
, frame
, time
);
1381 TRACE(" ghost stream failed to seek: %s\n", strerror(ret
));
1385 fLastReportedKeyframe
.reportedFrame
= *frame
;
1386 fLastReportedKeyframe
.reportedTime
= *time
;
1388 TRACE_FIND(" found time: %.2fs\n", *time
/ 1000000.0);
1389 if ((flags
& B_MEDIA_SEEK_TO_FRAME
) != 0) {
1390 *frame
= int64_t(*time
* FrameRate() / 1000000.0 + 0.5);
1391 TRACE_FIND(" found frame: %lld\n", *frame
);
1399 AVFormatReader::Stream::Seek(uint32 flags
, int64
* frame
, bigtime_t
* time
)
1401 BAutolock
_(&fLock
);
1403 if (fContext
== NULL
|| fStream
== NULL
)
1406 // Put the old requested values into frame/time, since we already know
1407 // that the sought frame/time will then match the reported values.
1408 // TODO: Will not work if client changes seek flags (from backwards to
1409 // forward or vice versa)!!
1410 bool inLastRequestedRange
= false;
1411 if ((flags
& B_MEDIA_SEEK_TO_FRAME
) != 0) {
1412 if (fLastReportedKeyframe
.reportedFrame
1413 <= fLastReportedKeyframe
.requestedFrame
) {
1414 inLastRequestedRange
1415 = *frame
>= fLastReportedKeyframe
.reportedFrame
1416 && *frame
<= fLastReportedKeyframe
.requestedFrame
;
1418 inLastRequestedRange
1419 = *frame
>= fLastReportedKeyframe
.requestedFrame
1420 && *frame
<= fLastReportedKeyframe
.reportedFrame
;
1422 } else if ((flags
& B_MEDIA_SEEK_TO_FRAME
) == 0) {
1423 if (fLastReportedKeyframe
.reportedTime
1424 <= fLastReportedKeyframe
.requestedTime
) {
1425 inLastRequestedRange
1426 = *time
>= fLastReportedKeyframe
.reportedTime
1427 && *time
<= fLastReportedKeyframe
.requestedTime
;
1429 inLastRequestedRange
1430 = *time
>= fLastReportedKeyframe
.requestedTime
1431 && *time
<= fLastReportedKeyframe
.reportedTime
;
1435 if (inLastRequestedRange
) {
1436 *frame
= fLastReportedKeyframe
.requestedFrame
;
1437 *time
= fLastReportedKeyframe
.requestedTime
;
1438 flags
= fLastReportedKeyframe
.seekFlags
;
1441 return StreamBase::Seek(flags
, frame
, time
);
1445 // #pragma mark - AVFormatReader
1448 AVFormatReader::AVFormatReader()
1452 fSourceLock("source I/O lock")
1454 TRACE("AVFormatReader::AVFormatReader\n");
1458 AVFormatReader::~AVFormatReader()
1460 TRACE("AVFormatReader::~AVFormatReader\n");
1461 if (fStreams
!= NULL
) {
1462 // The client was supposed to call FreeCookie() on all
1463 // allocated streams. Deleting the first stream is always
1464 // prevented, we delete the other ones just in case.
1465 int32 count
= fStreams
[0]->CountStreams();
1466 for (int32 i
= 0; i
< count
; i
++)
1477 AVFormatReader::Copyright()
1479 if (fCopyright
.Length() <= 0) {
1481 if (GetMetaData(&message
) == B_OK
)
1482 message
.FindString("copyright", &fCopyright
);
1484 return fCopyright
.String();
1489 AVFormatReader::Sniff(int32
* _streamCount
)
1491 TRACE("AVFormatReader::Sniff\n");
1493 BMediaIO
* source
= dynamic_cast<BMediaIO
*>(Source());
1494 if (source
== NULL
) {
1495 TRACE(" not a BMediaIO, but we need it to be one.\n");
1496 return B_NOT_SUPPORTED
;
1499 Stream
* stream
= new(std::nothrow
) Stream(source
,
1501 if (stream
== NULL
) {
1502 ERROR("AVFormatReader::Sniff() - failed to allocate Stream\n");
1506 ObjectDeleter
<Stream
> streamDeleter(stream
);
1508 status_t ret
= stream
->Open();
1510 TRACE(" failed to detect stream: %s\n", strerror(ret
));
1517 int32 streamCount
= stream
->CountStreams();
1518 if (streamCount
== 0) {
1519 TRACE(" failed to detect any streams: %s\n", strerror(ret
));
1523 fStreams
= new(std::nothrow
) Stream
*[streamCount
];
1524 if (fStreams
== NULL
) {
1525 ERROR("AVFormatReader::Sniff() - failed to allocate streams\n");
1529 memset(fStreams
, 0, sizeof(Stream
*) * streamCount
);
1530 fStreams
[0] = stream
;
1531 streamDeleter
.Detach();
1533 #ifdef TRACE_AVFORMAT_READER
1534 av_dump_format(const_cast<AVFormatContext
*>(stream
->Context()), 0, "", 0);
1537 if (_streamCount
!= NULL
)
1538 *_streamCount
= streamCount
;
1545 AVFormatReader::GetFileFormatInfo(media_file_format
* mff
)
1547 TRACE("AVFormatReader::GetFileFormatInfo\n");
1549 if (fStreams
== NULL
)
1552 // The first cookie is always there!
1553 const AVFormatContext
* context
= fStreams
[0]->Context();
1555 if (context
== NULL
|| context
->iformat
== NULL
) {
1556 TRACE(" no AVFormatContext or AVInputFormat!\n");
1560 const media_file_format
* format
= demuxer_format_for(context
->iformat
);
1562 mff
->capabilities
= media_file_format::B_READABLE
1563 | media_file_format::B_KNOWS_ENCODED_VIDEO
1564 | media_file_format::B_KNOWS_ENCODED_AUDIO
1565 | media_file_format::B_IMPERFECTLY_SEEKABLE
;
1567 if (format
!= NULL
) {
1568 mff
->family
= format
->family
;
1570 TRACE(" no DemuxerFormat for AVInputFormat!\n");
1571 mff
->family
= B_MISC_FORMAT_FAMILY
;
1576 if (format
!= NULL
) {
1577 strcpy(mff
->mime_type
, format
->mime_type
);
1579 // TODO: Would be nice to be able to provide this from AVInputFormat,
1580 // maybe by extending the FFmpeg code itself (all demuxers).
1581 strcpy(mff
->mime_type
, "");
1584 if (context
->iformat
->extensions
!= NULL
)
1585 strcpy(mff
->file_extension
, context
->iformat
->extensions
);
1587 TRACE(" no file extensions for AVInputFormat.\n");
1588 strcpy(mff
->file_extension
, "");
1591 if (context
->iformat
->name
!= NULL
)
1592 strcpy(mff
->short_name
, context
->iformat
->name
);
1594 TRACE(" no short name for AVInputFormat.\n");
1595 strcpy(mff
->short_name
, "");
1598 if (context
->iformat
->long_name
!= NULL
)
1599 sprintf(mff
->pretty_name
, "%s (FFmpeg)", context
->iformat
->long_name
);
1602 sprintf(mff
->pretty_name
, "%s (FFmpeg)", format
->pretty_name
);
1604 strcpy(mff
->pretty_name
, "Unknown (FFmpeg)");
1610 AVFormatReader::GetMetaData(BMessage
* _data
)
1612 // The first cookie is always there!
1613 const AVFormatContext
* context
= fStreams
[0]->Context();
1615 if (context
== NULL
)
1618 avdictionary_to_message(context
->metadata
, _data
);
1621 for (unsigned i
= 0; i
< context
->nb_chapters
; i
++) {
1622 AVChapter
* chapter
= context
->chapters
[i
];
1623 BMessage chapterData
;
1624 chapterData
.AddInt64("start", bigtime_t(1000000.0
1625 * chapter
->start
* chapter
->time_base
.num
1626 / chapter
->time_base
.den
+ 0.5));
1627 chapterData
.AddInt64("end", bigtime_t(1000000.0
1628 * chapter
->end
* chapter
->time_base
.num
1629 / chapter
->time_base
.den
+ 0.5));
1631 avdictionary_to_message(chapter
->metadata
, &chapterData
);
1632 _data
->AddMessage("be:chapter", &chapterData
);
1636 for (unsigned i
= 0; i
< context
->nb_programs
; i
++) {
1637 BMessage programData
;
1638 avdictionary_to_message(context
->programs
[i
]->metadata
, &programData
);
1639 _data
->AddMessage("be:program", &programData
);
1650 AVFormatReader::AllocateCookie(int32 streamIndex
, void** _cookie
)
1652 TRACE("AVFormatReader::AllocateCookie(%ld)\n", streamIndex
);
1654 BAutolock
_(fSourceLock
);
1656 if (fStreams
== NULL
)
1659 if (streamIndex
< 0 || streamIndex
>= fStreams
[0]->CountStreams())
1662 if (_cookie
== NULL
)
1665 Stream
* cookie
= fStreams
[streamIndex
];
1666 if (cookie
== NULL
) {
1667 // Allocate the cookie
1668 BMediaIO
* source
= dynamic_cast<BMediaIO
*>(Source());
1669 if (source
== NULL
) {
1670 TRACE(" not a BMediaIO, but we need it to be one.\n");
1671 return B_NOT_SUPPORTED
;
1674 cookie
= new(std::nothrow
) Stream(source
, &fSourceLock
);
1675 if (cookie
== NULL
) {
1676 ERROR("AVFormatReader::Sniff() - failed to allocate "
1681 status_t ret
= cookie
->Open();
1683 TRACE(" stream failed to open: %s\n", strerror(ret
));
1689 status_t ret
= cookie
->Init(streamIndex
);
1691 TRACE(" stream failed to initialize: %s\n", strerror(ret
));
1692 // NOTE: Never delete the first stream!
1693 if (streamIndex
!= 0)
1698 fStreams
[streamIndex
] = cookie
;
1706 AVFormatReader::FreeCookie(void *_cookie
)
1708 BAutolock
_(fSourceLock
);
1710 Stream
* cookie
= reinterpret_cast<Stream
*>(_cookie
);
1712 // NOTE: Never delete the first cookie!
1713 if (cookie
!= NULL
&& cookie
->VirtualIndex() != 0) {
1714 if (fStreams
!= NULL
)
1715 fStreams
[cookie
->VirtualIndex()] = NULL
;
1727 AVFormatReader::GetStreamInfo(void* _cookie
, int64
* frameCount
,
1728 bigtime_t
* duration
, media_format
* format
, const void** infoBuffer
,
1731 Stream
* cookie
= reinterpret_cast<Stream
*>(_cookie
);
1732 return cookie
->GetStreamInfo(frameCount
, duration
, format
, infoBuffer
,
1738 AVFormatReader::GetStreamMetaData(void* _cookie
, BMessage
* _data
)
1740 Stream
* cookie
= reinterpret_cast<Stream
*>(_cookie
);
1741 return cookie
->GetMetaData(_data
);
1746 AVFormatReader::Seek(void* _cookie
, uint32 seekTo
, int64
* frame
,
1749 Stream
* cookie
= reinterpret_cast<Stream
*>(_cookie
);
1750 return cookie
->Seek(seekTo
, frame
, time
);
1755 AVFormatReader::FindKeyFrame(void* _cookie
, uint32 flags
, int64
* frame
,
1758 Stream
* cookie
= reinterpret_cast<Stream
*>(_cookie
);
1759 return cookie
->FindKeyFrame(flags
, frame
, time
);
1764 AVFormatReader::GetNextChunk(void* _cookie
, const void** chunkBuffer
,
1765 size_t* chunkSize
, media_header
* mediaHeader
)
1767 Stream
* cookie
= reinterpret_cast<Stream
*>(_cookie
);
1768 return cookie
->GetNextChunk(chunkBuffer
, chunkSize
, mediaHeader
);