1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "media/filters/ffmpeg_audio_decoder.h"
7 #include "base/callback_helpers.h"
8 #include "base/single_thread_task_runner.h"
9 #include "media/base/audio_buffer.h"
10 #include "media/base/audio_bus.h"
11 #include "media/base/audio_decoder_config.h"
12 #include "media/base/audio_discard_helper.h"
13 #include "media/base/bind_to_current_loop.h"
14 #include "media/base/decoder_buffer.h"
15 #include "media/base/limits.h"
16 #include "media/base/timestamp_constants.h"
17 #include "media/ffmpeg/ffmpeg_common.h"
18 #include "media/filters/ffmpeg_glue.h"
22 // Returns true if the decode result was end of stream.
23 static inline bool IsEndOfStream(int result
,
25 const scoped_refptr
<DecoderBuffer
>& input
) {
26 // Three conditions to meet to declare end of stream for this decoder:
27 // 1. FFmpeg didn't read anything.
28 // 2. FFmpeg didn't output anything.
29 // 3. An end of stream buffer is received.
30 return result
== 0 && decoded_size
== 0 && input
->end_of_stream();
33 // Return the number of channels from the data in |frame|.
34 static inline int DetermineChannels(AVFrame
* frame
) {
35 #if defined(CHROMIUM_NO_AVFRAME_CHANNELS)
36 // When use_system_ffmpeg==1, libav's AVFrame doesn't have channels field.
37 return av_get_channel_layout_nb_channels(frame
->channel_layout
);
39 return frame
->channels
;
43 // Called by FFmpeg's allocation routine to free a buffer. |opaque| is the
44 // AudioBuffer allocated, so unref it.
45 static void ReleaseAudioBufferImpl(void* opaque
, uint8
* data
) {
46 scoped_refptr
<AudioBuffer
> buffer
;
47 buffer
.swap(reinterpret_cast<AudioBuffer
**>(&opaque
));
50 // Called by FFmpeg's allocation routine to allocate a buffer. Uses
51 // AVCodecContext.opaque to get the object reference in order to call
52 // GetAudioBuffer() to do the actual allocation.
53 static int GetAudioBuffer(struct AVCodecContext
* s
, AVFrame
* frame
, int flags
) {
54 DCHECK(s
->codec
->capabilities
& CODEC_CAP_DR1
);
55 DCHECK_EQ(s
->codec_type
, AVMEDIA_TYPE_AUDIO
);
57 // Since this routine is called by FFmpeg when a buffer is required for audio
58 // data, use the values supplied by FFmpeg (ignoring the current settings).
59 // FFmpegDecode() gets to determine if the buffer is useable or not.
60 AVSampleFormat format
= static_cast<AVSampleFormat
>(frame
->format
);
61 SampleFormat sample_format
= AVSampleFormatToSampleFormat(format
);
62 int channels
= DetermineChannels(frame
);
63 if (channels
<= 0 || channels
>= limits::kMaxChannels
) {
64 DLOG(ERROR
) << "Requested number of channels (" << channels
65 << ") exceeds limit.";
66 return AVERROR(EINVAL
);
69 int bytes_per_channel
= SampleFormatToBytesPerChannel(sample_format
);
70 if (frame
->nb_samples
<= 0)
71 return AVERROR(EINVAL
);
73 if (s
->channels
!= channels
) {
74 DLOG(ERROR
) << "AVCodecContext and AVFrame disagree on channel count.";
75 return AVERROR(EINVAL
);
78 // Determine how big the buffer should be and allocate it. FFmpeg may adjust
79 // how big each channel data is in order to meet the alignment policy, so
80 // we need to take this into consideration.
81 int buffer_size_in_bytes
=
82 av_samples_get_buffer_size(&frame
->linesize
[0],
86 AudioBuffer::kChannelAlignment
);
87 // Check for errors from av_samples_get_buffer_size().
88 if (buffer_size_in_bytes
< 0)
89 return buffer_size_in_bytes
;
90 int frames_required
= buffer_size_in_bytes
/ bytes_per_channel
/ channels
;
91 DCHECK_GE(frames_required
, frame
->nb_samples
);
92 scoped_refptr
<AudioBuffer
> buffer
= AudioBuffer::CreateBuffer(
94 ChannelLayoutToChromeChannelLayout(s
->channel_layout
, s
->channels
),
99 // Initialize the data[] and extended_data[] fields to point into the memory
100 // allocated for AudioBuffer. |number_of_planes| will be 1 for interleaved
101 // audio and equal to |channels| for planar audio.
102 int number_of_planes
= buffer
->channel_data().size();
103 if (number_of_planes
<= AV_NUM_DATA_POINTERS
) {
104 DCHECK_EQ(frame
->extended_data
, frame
->data
);
105 for (int i
= 0; i
< number_of_planes
; ++i
)
106 frame
->data
[i
] = buffer
->channel_data()[i
];
108 // There are more channels than can fit into data[], so allocate
109 // extended_data[] and fill appropriately.
110 frame
->extended_data
= static_cast<uint8
**>(
111 av_malloc(number_of_planes
* sizeof(*frame
->extended_data
)));
113 for (; i
< AV_NUM_DATA_POINTERS
; ++i
)
114 frame
->extended_data
[i
] = frame
->data
[i
] = buffer
->channel_data()[i
];
115 for (; i
< number_of_planes
; ++i
)
116 frame
->extended_data
[i
] = buffer
->channel_data()[i
];
119 // Now create an AVBufferRef for the data just allocated. It will own the
120 // reference to the AudioBuffer object.
122 buffer
.swap(reinterpret_cast<AudioBuffer
**>(&opaque
));
123 frame
->buf
[0] = av_buffer_create(
124 frame
->data
[0], buffer_size_in_bytes
, ReleaseAudioBufferImpl
, opaque
, 0);
128 FFmpegAudioDecoder::FFmpegAudioDecoder(
129 const scoped_refptr
<base::SingleThreadTaskRunner
>& task_runner
,
130 const scoped_refptr
<MediaLog
>& media_log
)
131 : task_runner_(task_runner
),
132 state_(kUninitialized
),
133 av_sample_format_(0),
134 media_log_(media_log
) {
137 FFmpegAudioDecoder::~FFmpegAudioDecoder() {
138 DCHECK(task_runner_
->BelongsToCurrentThread());
140 if (state_
!= kUninitialized
)
141 ReleaseFFmpegResources();
144 std::string
FFmpegAudioDecoder::GetDisplayName() const {
145 return "FFmpegAudioDecoder";
148 void FFmpegAudioDecoder::Initialize(const AudioDecoderConfig
& config
,
149 const InitCB
& init_cb
,
150 const OutputCB
& output_cb
) {
151 DCHECK(task_runner_
->BelongsToCurrentThread());
152 DCHECK(!config
.is_encrypted());
154 FFmpegGlue::InitializeFFmpeg();
157 InitCB bound_init_cb
= BindToCurrentLoop(init_cb
);
159 if (!config
.IsValidConfig() || !ConfigureDecoder()) {
160 bound_init_cb
.Run(false);
165 output_cb_
= BindToCurrentLoop(output_cb
);
167 bound_init_cb
.Run(true);
170 void FFmpegAudioDecoder::Decode(const scoped_refptr
<DecoderBuffer
>& buffer
,
171 const DecodeCB
& decode_cb
) {
172 DCHECK(task_runner_
->BelongsToCurrentThread());
173 DCHECK(!decode_cb
.is_null());
174 CHECK_NE(state_
, kUninitialized
);
175 DecodeCB decode_cb_bound
= BindToCurrentLoop(decode_cb
);
177 if (state_
== kError
) {
178 decode_cb_bound
.Run(kDecodeError
);
182 // Do nothing if decoding has finished.
183 if (state_
== kDecodeFinished
) {
184 decode_cb_bound
.Run(kOk
);
188 DecodeBuffer(buffer
, decode_cb_bound
);
191 void FFmpegAudioDecoder::Reset(const base::Closure
& closure
) {
192 DCHECK(task_runner_
->BelongsToCurrentThread());
194 avcodec_flush_buffers(codec_context_
.get());
196 ResetTimestampState();
197 task_runner_
->PostTask(FROM_HERE
, closure
);
200 void FFmpegAudioDecoder::DecodeBuffer(
201 const scoped_refptr
<DecoderBuffer
>& buffer
,
202 const DecodeCB
& decode_cb
) {
203 DCHECK(task_runner_
->BelongsToCurrentThread());
204 DCHECK_NE(state_
, kUninitialized
);
205 DCHECK_NE(state_
, kDecodeFinished
);
206 DCHECK_NE(state_
, kError
);
207 DCHECK(buffer
.get());
209 // Make sure we are notified if http://crbug.com/49709 returns. Issue also
210 // occurs with some damaged files.
211 if (!buffer
->end_of_stream() && buffer
->timestamp() == kNoTimestamp()) {
212 DVLOG(1) << "Received a buffer without timestamps!";
213 decode_cb
.Run(kDecodeError
);
217 bool has_produced_frame
;
219 has_produced_frame
= false;
220 if (!FFmpegDecode(buffer
, &has_produced_frame
)) {
222 decode_cb
.Run(kDecodeError
);
225 // Repeat to flush the decoder after receiving EOS buffer.
226 } while (buffer
->end_of_stream() && has_produced_frame
);
228 if (buffer
->end_of_stream())
229 state_
= kDecodeFinished
;
234 bool FFmpegAudioDecoder::FFmpegDecode(
235 const scoped_refptr
<DecoderBuffer
>& buffer
,
236 bool* has_produced_frame
) {
237 DCHECK(!*has_produced_frame
);
240 av_init_packet(&packet
);
241 if (buffer
->end_of_stream()) {
245 packet
.data
= const_cast<uint8
*>(buffer
->data());
246 packet
.size
= buffer
->data_size();
249 // Each audio packet may contain several frames, so we must call the decoder
250 // until we've exhausted the packet. Regardless of the packet size we always
251 // want to hand it to the decoder at least once, otherwise we would end up
252 // skipping end of stream packets since they have a size of zero.
254 int frame_decoded
= 0;
255 const int result
= avcodec_decode_audio4(
256 codec_context_
.get(), av_frame_
.get(), &frame_decoded
, &packet
);
259 DCHECK(!buffer
->end_of_stream())
260 << "End of stream buffer produced an error! "
261 << "This is quite possibly a bug in the audio decoder not handling "
262 << "end of stream AVPackets correctly.";
264 MEDIA_LOG(DEBUG
, media_log_
)
265 << "Dropping audio frame which failed decode with timestamp: "
266 << buffer
->timestamp().InMicroseconds()
267 << " us, duration: " << buffer
->duration().InMicroseconds()
268 << " us, packet size: " << buffer
->data_size() << " bytes";
273 // Update packet size and data pointer in case we need to call the decoder
274 // with the remaining bytes from this packet.
275 packet
.size
-= result
;
276 packet
.data
+= result
;
278 scoped_refptr
<AudioBuffer
> output
;
279 const int channels
= DetermineChannels(av_frame_
.get());
281 if (av_frame_
->sample_rate
!= config_
.samples_per_second() ||
282 channels
!= ChannelLayoutToChannelCount(config_
.channel_layout()) ||
283 av_frame_
->format
!= av_sample_format_
) {
284 DLOG(ERROR
) << "Unsupported midstream configuration change!"
285 << " Sample Rate: " << av_frame_
->sample_rate
<< " vs "
286 << config_
.samples_per_second()
287 << ", Channels: " << channels
<< " vs "
288 << ChannelLayoutToChannelCount(config_
.channel_layout())
289 << ", Sample Format: " << av_frame_
->format
<< " vs "
290 << av_sample_format_
;
292 if (config_
.codec() == kCodecAAC
&&
293 av_frame_
->sample_rate
== 2 * config_
.samples_per_second()) {
294 MEDIA_LOG(DEBUG
, media_log_
)
295 << "Implicit HE-AAC signalling is being"
296 << " used. Please use mp4a.40.5 instead of"
297 << " mp4a.40.2 in the mimetype.";
299 // This is an unrecoverable error, so bail out.
300 av_frame_unref(av_frame_
.get());
304 // Get the AudioBuffer that the data was decoded into. Adjust the number
305 // of frames, in case fewer than requested were actually decoded.
306 output
= reinterpret_cast<AudioBuffer
*>(
307 av_buffer_get_opaque(av_frame_
->buf
[0]));
309 DCHECK_EQ(ChannelLayoutToChannelCount(config_
.channel_layout()),
310 output
->channel_count());
311 const int unread_frames
= output
->frame_count() - av_frame_
->nb_samples
;
312 DCHECK_GE(unread_frames
, 0);
313 if (unread_frames
> 0)
314 output
->TrimEnd(unread_frames
);
315 av_frame_unref(av_frame_
.get());
318 // WARNING: |av_frame_| no longer has valid data at this point.
319 const int decoded_frames
= frame_decoded
? output
->frame_count() : 0;
320 if (IsEndOfStream(result
, decoded_frames
, buffer
)) {
321 DCHECK_EQ(packet
.size
, 0);
322 } else if (discard_helper_
->ProcessBuffers(buffer
, output
)) {
323 *has_produced_frame
= true;
324 output_cb_
.Run(output
);
326 } while (packet
.size
> 0);
331 void FFmpegAudioDecoder::ReleaseFFmpegResources() {
332 codec_context_
.reset();
336 bool FFmpegAudioDecoder::ConfigureDecoder() {
337 if (!config_
.IsValidConfig()) {
338 DLOG(ERROR
) << "Invalid audio stream -"
339 << " codec: " << config_
.codec()
340 << " channel layout: " << config_
.channel_layout()
341 << " bits per channel: " << config_
.bits_per_channel()
342 << " samples per second: " << config_
.samples_per_second();
346 if (config_
.is_encrypted()) {
347 DLOG(ERROR
) << "Encrypted audio stream not supported";
351 // Release existing decoder resources if necessary.
352 ReleaseFFmpegResources();
354 // Initialize AVCodecContext structure.
355 codec_context_
.reset(avcodec_alloc_context3(NULL
));
356 AudioDecoderConfigToAVCodecContext(config_
, codec_context_
.get());
358 codec_context_
->opaque
= this;
359 codec_context_
->get_buffer2
= GetAudioBuffer
;
360 codec_context_
->refcounted_frames
= 1;
362 AVCodec
* codec
= avcodec_find_decoder(codec_context_
->codec_id
);
363 if (!codec
|| avcodec_open2(codec_context_
.get(), codec
, NULL
) < 0) {
364 DLOG(ERROR
) << "Could not initialize audio decoder: "
365 << codec_context_
->codec_id
;
366 ReleaseFFmpegResources();
367 state_
= kUninitialized
;
372 av_frame_
.reset(av_frame_alloc());
373 av_sample_format_
= codec_context_
->sample_fmt
;
375 if (codec_context_
->channels
!=
376 ChannelLayoutToChannelCount(config_
.channel_layout())) {
377 DLOG(ERROR
) << "Audio configuration specified "
378 << ChannelLayoutToChannelCount(config_
.channel_layout())
379 << " channels, but FFmpeg thinks the file contains "
380 << codec_context_
->channels
<< " channels";
381 ReleaseFFmpegResources();
382 state_
= kUninitialized
;
386 ResetTimestampState();
390 void FFmpegAudioDecoder::ResetTimestampState() {
391 discard_helper_
.reset(new AudioDiscardHelper(config_
.samples_per_second(),
392 config_
.codec_delay()));
393 discard_helper_
->Reset(config_
.codec_delay());