1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "media/filters/ffmpeg_audio_decoder.h"
7 #include "base/callback_helpers.h"
8 #include "base/single_thread_task_runner.h"
9 #include "media/base/audio_buffer.h"
10 #include "media/base/audio_bus.h"
11 #include "media/base/audio_decoder_config.h"
12 #include "media/base/audio_discard_helper.h"
13 #include "media/base/bind_to_current_loop.h"
14 #include "media/base/decoder_buffer.h"
15 #include "media/base/limits.h"
16 #include "media/base/sample_format.h"
17 #include "media/ffmpeg/ffmpeg_common.h"
18 #include "media/filters/ffmpeg_glue.h"
22 // Returns true if the decode result was end of stream.
23 static inline bool IsEndOfStream(int result
,
25 const scoped_refptr
<DecoderBuffer
>& input
) {
26 // Three conditions to meet to declare end of stream for this decoder:
27 // 1. FFmpeg didn't read anything.
28 // 2. FFmpeg didn't output anything.
29 // 3. An end of stream buffer is received.
30 return result
== 0 && decoded_size
== 0 && input
->end_of_stream();
33 // Return the number of channels from the data in |frame|.
34 static inline int DetermineChannels(AVFrame
* frame
) {
35 #if defined(CHROMIUM_NO_AVFRAME_CHANNELS)
36 // When use_system_ffmpeg==1, libav's AVFrame doesn't have channels field.
37 return av_get_channel_layout_nb_channels(frame
->channel_layout
);
39 return frame
->channels
;
43 // Called by FFmpeg's allocation routine to free a buffer. |opaque| is the
44 // AudioBuffer allocated, so unref it.
45 static void ReleaseAudioBufferImpl(void* opaque
, uint8
* data
) {
46 scoped_refptr
<AudioBuffer
> buffer
;
47 buffer
.swap(reinterpret_cast<AudioBuffer
**>(&opaque
));
50 // Called by FFmpeg's allocation routine to allocate a buffer. Uses
51 // AVCodecContext.opaque to get the object reference in order to call
52 // GetAudioBuffer() to do the actual allocation.
53 static int GetAudioBuffer(struct AVCodecContext
* s
, AVFrame
* frame
, int flags
) {
54 DCHECK(s
->codec
->capabilities
& CODEC_CAP_DR1
);
55 DCHECK_EQ(s
->codec_type
, AVMEDIA_TYPE_AUDIO
);
57 // Since this routine is called by FFmpeg when a buffer is required for audio
58 // data, use the values supplied by FFmpeg (ignoring the current settings).
59 // FFmpegDecode() gets to determine if the buffer is useable or not.
60 AVSampleFormat format
= static_cast<AVSampleFormat
>(frame
->format
);
61 SampleFormat sample_format
= AVSampleFormatToSampleFormat(format
);
62 int channels
= DetermineChannels(frame
);
63 if (channels
<= 0 || channels
>= limits::kMaxChannels
) {
64 DLOG(ERROR
) << "Requested number of channels (" << channels
65 << ") exceeds limit.";
66 return AVERROR(EINVAL
);
69 int bytes_per_channel
= SampleFormatToBytesPerChannel(sample_format
);
70 if (frame
->nb_samples
<= 0)
71 return AVERROR(EINVAL
);
73 if (s
->channels
!= channels
) {
74 DLOG(ERROR
) << "AVCodecContext and AVFrame disagree on channel count.";
75 return AVERROR(EINVAL
);
78 // Determine how big the buffer should be and allocate it. FFmpeg may adjust
79 // how big each channel data is in order to meet the alignment policy, so
80 // we need to take this into consideration.
81 int buffer_size_in_bytes
=
82 av_samples_get_buffer_size(&frame
->linesize
[0],
86 AudioBuffer::kChannelAlignment
);
87 // Check for errors from av_samples_get_buffer_size().
88 if (buffer_size_in_bytes
< 0)
89 return buffer_size_in_bytes
;
90 int frames_required
= buffer_size_in_bytes
/ bytes_per_channel
/ channels
;
91 DCHECK_GE(frames_required
, frame
->nb_samples
);
92 scoped_refptr
<AudioBuffer
> buffer
= AudioBuffer::CreateBuffer(
94 ChannelLayoutToChromeChannelLayout(s
->channel_layout
, s
->channels
),
99 // Initialize the data[] and extended_data[] fields to point into the memory
100 // allocated for AudioBuffer. |number_of_planes| will be 1 for interleaved
101 // audio and equal to |channels| for planar audio.
102 int number_of_planes
= buffer
->channel_data().size();
103 if (number_of_planes
<= AV_NUM_DATA_POINTERS
) {
104 DCHECK_EQ(frame
->extended_data
, frame
->data
);
105 for (int i
= 0; i
< number_of_planes
; ++i
)
106 frame
->data
[i
] = buffer
->channel_data()[i
];
108 // There are more channels than can fit into data[], so allocate
109 // extended_data[] and fill appropriately.
110 frame
->extended_data
= static_cast<uint8
**>(
111 av_malloc(number_of_planes
* sizeof(*frame
->extended_data
)));
113 for (; i
< AV_NUM_DATA_POINTERS
; ++i
)
114 frame
->extended_data
[i
] = frame
->data
[i
] = buffer
->channel_data()[i
];
115 for (; i
< number_of_planes
; ++i
)
116 frame
->extended_data
[i
] = buffer
->channel_data()[i
];
119 // Now create an AVBufferRef for the data just allocated. It will own the
120 // reference to the AudioBuffer object.
122 buffer
.swap(reinterpret_cast<AudioBuffer
**>(&opaque
));
123 frame
->buf
[0] = av_buffer_create(
124 frame
->data
[0], buffer_size_in_bytes
, ReleaseAudioBufferImpl
, opaque
, 0);
128 FFmpegAudioDecoder::FFmpegAudioDecoder(
129 const scoped_refptr
<base::SingleThreadTaskRunner
>& task_runner
,
131 : task_runner_(task_runner
),
132 state_(kUninitialized
),
133 av_sample_format_(0),
137 FFmpegAudioDecoder::~FFmpegAudioDecoder() {
138 DCHECK(task_runner_
->BelongsToCurrentThread());
140 if (state_
!= kUninitialized
) {
141 ReleaseFFmpegResources();
142 ResetTimestampState();
146 void FFmpegAudioDecoder::Initialize(const AudioDecoderConfig
& config
,
147 const PipelineStatusCB
& status_cb
,
148 const OutputCB
& output_cb
) {
149 DCHECK(task_runner_
->BelongsToCurrentThread());
150 DCHECK(!config
.is_encrypted());
152 FFmpegGlue::InitializeFFmpeg();
155 PipelineStatusCB initialize_cb
= BindToCurrentLoop(status_cb
);
157 if (!config
.IsValidConfig() || !ConfigureDecoder()) {
158 initialize_cb
.Run(DECODER_ERROR_NOT_SUPPORTED
);
163 output_cb_
= BindToCurrentLoop(output_cb
);
165 initialize_cb
.Run(PIPELINE_OK
);
168 void FFmpegAudioDecoder::Decode(const scoped_refptr
<DecoderBuffer
>& buffer
,
169 const DecodeCB
& decode_cb
) {
170 DCHECK(task_runner_
->BelongsToCurrentThread());
171 DCHECK(!decode_cb
.is_null());
172 CHECK_NE(state_
, kUninitialized
);
173 DecodeCB decode_cb_bound
= BindToCurrentLoop(decode_cb
);
175 if (state_
== kError
) {
176 decode_cb_bound
.Run(kDecodeError
);
180 // Do nothing if decoding has finished.
181 if (state_
== kDecodeFinished
) {
182 decode_cb_bound
.Run(kOk
);
186 DecodeBuffer(buffer
, decode_cb_bound
);
189 void FFmpegAudioDecoder::Reset(const base::Closure
& closure
) {
190 DCHECK(task_runner_
->BelongsToCurrentThread());
192 avcodec_flush_buffers(codec_context_
.get());
194 ResetTimestampState();
195 task_runner_
->PostTask(FROM_HERE
, closure
);
198 void FFmpegAudioDecoder::DecodeBuffer(
199 const scoped_refptr
<DecoderBuffer
>& buffer
,
200 const DecodeCB
& decode_cb
) {
201 DCHECK(task_runner_
->BelongsToCurrentThread());
202 DCHECK_NE(state_
, kUninitialized
);
203 DCHECK_NE(state_
, kDecodeFinished
);
204 DCHECK_NE(state_
, kError
);
207 // Make sure we are notified if http://crbug.com/49709 returns. Issue also
208 // occurs with some damaged files.
209 if (!buffer
->end_of_stream() && buffer
->timestamp() == kNoTimestamp()) {
210 DVLOG(1) << "Received a buffer without timestamps!";
211 decode_cb
.Run(kDecodeError
);
215 bool has_produced_frame
;
217 has_produced_frame
= false;
218 if (!FFmpegDecode(buffer
, &has_produced_frame
)) {
220 decode_cb
.Run(kDecodeError
);
223 // Repeat to flush the decoder after receiving EOS buffer.
224 } while (buffer
->end_of_stream() && has_produced_frame
);
226 if (buffer
->end_of_stream())
227 state_
= kDecodeFinished
;
232 bool FFmpegAudioDecoder::FFmpegDecode(
233 const scoped_refptr
<DecoderBuffer
>& buffer
,
234 bool* has_produced_frame
) {
235 DCHECK(!*has_produced_frame
);
238 av_init_packet(&packet
);
239 if (buffer
->end_of_stream()) {
243 packet
.data
= const_cast<uint8
*>(buffer
->data());
244 packet
.size
= buffer
->data_size();
247 // Each audio packet may contain several frames, so we must call the decoder
248 // until we've exhausted the packet. Regardless of the packet size we always
249 // want to hand it to the decoder at least once, otherwise we would end up
250 // skipping end of stream packets since they have a size of zero.
252 int frame_decoded
= 0;
253 const int result
= avcodec_decode_audio4(
254 codec_context_
.get(), av_frame_
.get(), &frame_decoded
, &packet
);
257 DCHECK(!buffer
->end_of_stream())
258 << "End of stream buffer produced an error! "
259 << "This is quite possibly a bug in the audio decoder not handling "
260 << "end of stream AVPackets correctly.";
263 << "Dropping audio frame which failed decode with timestamp: "
264 << buffer
->timestamp().InMicroseconds() << " us, duration: "
265 << buffer
->duration().InMicroseconds() << " us, packet size: "
266 << buffer
->data_size() << " bytes";
271 // Update packet size and data pointer in case we need to call the decoder
272 // with the remaining bytes from this packet.
273 packet
.size
-= result
;
274 packet
.data
+= result
;
276 scoped_refptr
<AudioBuffer
> output
;
277 const int channels
= DetermineChannels(av_frame_
.get());
279 if (av_frame_
->sample_rate
!= config_
.samples_per_second() ||
280 channels
!= ChannelLayoutToChannelCount(config_
.channel_layout()) ||
281 av_frame_
->format
!= av_sample_format_
) {
282 DLOG(ERROR
) << "Unsupported midstream configuration change!"
283 << " Sample Rate: " << av_frame_
->sample_rate
<< " vs "
284 << config_
.samples_per_second()
285 << ", Channels: " << channels
<< " vs "
286 << ChannelLayoutToChannelCount(config_
.channel_layout())
287 << ", Sample Format: " << av_frame_
->format
<< " vs "
288 << av_sample_format_
;
290 if (config_
.codec() == kCodecAAC
&&
291 av_frame_
->sample_rate
== 2 * config_
.samples_per_second()) {
292 MEDIA_LOG(log_cb_
) << "Implicit HE-AAC signalling is being used."
293 << " Please use mp4a.40.5 instead of mp4a.40.2 in"
296 // This is an unrecoverable error, so bail out.
297 av_frame_unref(av_frame_
.get());
301 // Get the AudioBuffer that the data was decoded into. Adjust the number
302 // of frames, in case fewer than requested were actually decoded.
303 output
= reinterpret_cast<AudioBuffer
*>(
304 av_buffer_get_opaque(av_frame_
->buf
[0]));
306 DCHECK_EQ(ChannelLayoutToChannelCount(config_
.channel_layout()),
307 output
->channel_count());
308 const int unread_frames
= output
->frame_count() - av_frame_
->nb_samples
;
309 DCHECK_GE(unread_frames
, 0);
310 if (unread_frames
> 0)
311 output
->TrimEnd(unread_frames
);
312 av_frame_unref(av_frame_
.get());
315 // WARNING: |av_frame_| no longer has valid data at this point.
316 const int decoded_frames
= frame_decoded
? output
->frame_count() : 0;
317 if (IsEndOfStream(result
, decoded_frames
, buffer
)) {
318 DCHECK_EQ(packet
.size
, 0);
319 } else if (discard_helper_
->ProcessBuffers(buffer
, output
)) {
320 *has_produced_frame
= true;
321 output_cb_
.Run(output
);
323 } while (packet
.size
> 0);
328 void FFmpegAudioDecoder::ReleaseFFmpegResources() {
329 codec_context_
.reset();
333 bool FFmpegAudioDecoder::ConfigureDecoder() {
334 if (!config_
.IsValidConfig()) {
335 DLOG(ERROR
) << "Invalid audio stream -"
336 << " codec: " << config_
.codec()
337 << " channel layout: " << config_
.channel_layout()
338 << " bits per channel: " << config_
.bits_per_channel()
339 << " samples per second: " << config_
.samples_per_second();
343 if (config_
.is_encrypted()) {
344 DLOG(ERROR
) << "Encrypted audio stream not supported";
348 // Release existing decoder resources if necessary.
349 ReleaseFFmpegResources();
351 // Initialize AVCodecContext structure.
352 codec_context_
.reset(avcodec_alloc_context3(NULL
));
353 AudioDecoderConfigToAVCodecContext(config_
, codec_context_
.get());
355 codec_context_
->opaque
= this;
356 codec_context_
->get_buffer2
= GetAudioBuffer
;
357 codec_context_
->refcounted_frames
= 1;
359 AVCodec
* codec
= avcodec_find_decoder(codec_context_
->codec_id
);
360 if (!codec
|| avcodec_open2(codec_context_
.get(), codec
, NULL
) < 0) {
361 DLOG(ERROR
) << "Could not initialize audio decoder: "
362 << codec_context_
->codec_id
;
363 ReleaseFFmpegResources();
364 state_
= kUninitialized
;
369 av_frame_
.reset(av_frame_alloc());
370 discard_helper_
.reset(new AudioDiscardHelper(config_
.samples_per_second(),
371 config_
.codec_delay()));
372 av_sample_format_
= codec_context_
->sample_fmt
;
374 if (codec_context_
->channels
!=
375 ChannelLayoutToChannelCount(config_
.channel_layout())) {
376 DLOG(ERROR
) << "Audio configuration specified "
377 << ChannelLayoutToChannelCount(config_
.channel_layout())
378 << " channels, but FFmpeg thinks the file contains "
379 << codec_context_
->channels
<< " channels";
380 ReleaseFFmpegResources();
381 state_
= kUninitialized
;
385 ResetTimestampState();
389 void FFmpegAudioDecoder::ResetTimestampState() {
390 discard_helper_
->Reset(config_
.codec_delay());