1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/renderer/media/media_stream_audio_processor.h"
7 #include "base/command_line.h"
8 #include "base/metrics/field_trial.h"
9 #include "base/metrics/histogram.h"
10 #include "base/trace_event/trace_event.h"
11 #include "content/public/common/content_switches.h"
12 #include "content/renderer/media/media_stream_audio_processor_options.h"
13 #include "content/renderer/media/rtc_media_constraints.h"
14 #include "content/renderer/media/webrtc_audio_device_impl.h"
15 #include "media/audio/audio_parameters.h"
16 #include "media/base/audio_converter.h"
17 #include "media/base/audio_fifo.h"
18 #include "media/base/channel_layout.h"
19 #include "third_party/WebKit/public/platform/WebMediaConstraints.h"
20 #include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface.h"
21 #include "third_party/webrtc/modules/audio_processing/typing_detection.h"
23 #if defined(OS_CHROMEOS)
24 #include "base/sys_info.h"
31 using webrtc::AudioProcessing
;
32 using webrtc::NoiseSuppression
;
34 const int kAudioProcessingNumberOfChannels
= 1;
36 AudioProcessing::ChannelLayout
MapLayout(media::ChannelLayout media_layout
) {
37 switch (media_layout
) {
38 case media::CHANNEL_LAYOUT_MONO
:
39 return AudioProcessing::kMono
;
40 case media::CHANNEL_LAYOUT_STEREO
:
41 return AudioProcessing::kStereo
;
42 case media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC
:
43 return AudioProcessing::kStereoAndKeyboard
;
45 NOTREACHED() << "Layout not supported: " << media_layout
;
46 return AudioProcessing::kMono
;
50 // This is only used for playout data where only max two channels is supported.
51 AudioProcessing::ChannelLayout
ChannelsToLayout(int num_channels
) {
52 switch (num_channels
) {
54 return AudioProcessing::kMono
;
56 return AudioProcessing::kStereo
;
58 NOTREACHED() << "Channels not supported: " << num_channels
;
59 return AudioProcessing::kMono
;
63 // Used by UMA histograms and entries shouldn't be re-ordered or removed.
64 enum AudioTrackProcessingStates
{
65 AUDIO_PROCESSING_ENABLED
= 0,
66 AUDIO_PROCESSING_DISABLED
,
67 AUDIO_PROCESSING_IN_WEBRTC
,
71 void RecordProcessingState(AudioTrackProcessingStates state
) {
72 UMA_HISTOGRAM_ENUMERATION("Media.AudioTrackProcessingStates",
73 state
, AUDIO_PROCESSING_MAX
);
76 bool IsDelayAgnosticAecEnabled() {
77 // Note: It's important to query the field trial state first, to ensure that
78 // UMA reports the correct group.
79 const std::string group_name
=
80 base::FieldTrialList::FindFullName("UseDelayAgnosticAEC");
81 base::CommandLine
* command_line
= base::CommandLine::ForCurrentProcess();
82 if (command_line
->HasSwitch(switches::kEnableDelayAgnosticAec
))
84 if (command_line
->HasSwitch(switches::kDisableDelayAgnosticAec
))
87 return (group_name
== "Enabled" || group_name
== "DefaultEnabled");
90 bool IsBeamformingEnabled(const MediaAudioConstraints
& audio_constraints
) {
91 return base::FieldTrialList::FindFullName("ChromebookBeamforming") ==
93 audio_constraints
.GetProperty(MediaAudioConstraints::kGoogBeamforming
);
98 // Wraps AudioBus to provide access to the array of channel pointers, since this
99 // is the type webrtc::AudioProcessing deals in. The array is refreshed on every
100 // channel_ptrs() call, and will be valid until the underlying AudioBus pointers
101 // are changed, e.g. through calls to SetChannelData() or SwapChannels().
103 // All methods are called on one of the capture or render audio threads
105 class MediaStreamAudioBus
{
107 MediaStreamAudioBus(int channels
, int frames
)
108 : bus_(media::AudioBus::Create(channels
, frames
)),
109 channel_ptrs_(new float*[channels
]) {
110 // May be created in the main render thread and used in the audio threads.
111 thread_checker_
.DetachFromThread();
114 media::AudioBus
* bus() {
115 DCHECK(thread_checker_
.CalledOnValidThread());
119 float* const* channel_ptrs() {
120 DCHECK(thread_checker_
.CalledOnValidThread());
121 for (int i
= 0; i
< bus_
->channels(); ++i
) {
122 channel_ptrs_
[i
] = bus_
->channel(i
);
124 return channel_ptrs_
.get();
128 base::ThreadChecker thread_checker_
;
129 scoped_ptr
<media::AudioBus
> bus_
;
130 scoped_ptr
<float*[]> channel_ptrs_
;
133 // Wraps AudioFifo to provide a cleaner interface to MediaStreamAudioProcessor.
134 // It avoids the FIFO when the source and destination frames match. All methods
135 // are called on one of the capture or render audio threads exclusively. If
136 // |source_channels| is larger than |destination_channels|, only the first
137 // |destination_channels| are kept from the source.
138 class MediaStreamAudioFifo
{
140 MediaStreamAudioFifo(int source_channels
,
141 int destination_channels
,
143 int destination_frames
,
145 : source_channels_(source_channels
),
146 source_frames_(source_frames
),
147 sample_rate_(sample_rate
),
149 new MediaStreamAudioBus(destination_channels
, destination_frames
)),
150 data_available_(false) {
151 DCHECK_GE(source_channels
, destination_channels
);
152 DCHECK_GT(sample_rate_
, 0);
154 if (source_channels
> destination_channels
) {
155 audio_source_intermediate_
=
156 media::AudioBus::CreateWrapper(destination_channels
);
159 if (source_frames
!= destination_frames
) {
160 // Since we require every Push to be followed by as many Consumes as
161 // possible, twice the larger of the two is a (probably) loose upper bound
163 const int fifo_frames
= 2 * std::max(source_frames
, destination_frames
);
164 fifo_
.reset(new media::AudioFifo(destination_channels
, fifo_frames
));
167 // May be created in the main render thread and used in the audio threads.
168 thread_checker_
.DetachFromThread();
171 void Push(const media::AudioBus
& source
, base::TimeDelta audio_delay
) {
172 DCHECK(thread_checker_
.CalledOnValidThread());
173 DCHECK_EQ(source
.channels(), source_channels_
);
174 DCHECK_EQ(source
.frames(), source_frames_
);
176 const media::AudioBus
* source_to_push
= &source
;
178 if (audio_source_intermediate_
) {
179 for (int i
= 0; i
< destination_
->bus()->channels(); ++i
) {
180 audio_source_intermediate_
->SetChannelData(
182 const_cast<float*>(source
.channel(i
)));
184 audio_source_intermediate_
->set_frames(source
.frames());
185 source_to_push
= audio_source_intermediate_
.get();
189 next_audio_delay_
= audio_delay
+
190 fifo_
->frames() * base::TimeDelta::FromSeconds(1) / sample_rate_
;
191 fifo_
->Push(source_to_push
);
193 source_to_push
->CopyTo(destination_
->bus());
194 next_audio_delay_
= audio_delay
;
195 data_available_
= true;
199 // Returns true if there are destination_frames() of data available to be
200 // consumed, and otherwise false.
201 bool Consume(MediaStreamAudioBus
** destination
,
202 base::TimeDelta
* audio_delay
) {
203 DCHECK(thread_checker_
.CalledOnValidThread());
206 if (fifo_
->frames() < destination_
->bus()->frames())
209 fifo_
->Consume(destination_
->bus(), 0, destination_
->bus()->frames());
210 *audio_delay
= next_audio_delay_
;
212 destination_
->bus()->frames() * base::TimeDelta::FromSeconds(1) /
215 if (!data_available_
)
217 *audio_delay
= next_audio_delay_
;
218 // The data was already copied to |destination_| in this case.
219 data_available_
= false;
222 *destination
= destination_
.get();
227 base::ThreadChecker thread_checker_
;
228 const int source_channels_
; // For a DCHECK.
229 const int source_frames_
; // For a DCHECK.
230 const int sample_rate_
;
231 scoped_ptr
<media::AudioBus
> audio_source_intermediate_
;
232 scoped_ptr
<MediaStreamAudioBus
> destination_
;
233 scoped_ptr
<media::AudioFifo
> fifo_
;
235 // When using |fifo_|, this is the audio delay of the first sample to be
236 // consumed next from the FIFO. When not using |fifo_|, this is the audio
237 // delay of the first sample in |destination_|.
238 base::TimeDelta next_audio_delay_
;
240 // True when |destination_| contains the data to be returned by the next call
241 // to Consume(). Only used when the FIFO is disabled.
242 bool data_available_
;
245 MediaStreamAudioProcessor::MediaStreamAudioProcessor(
246 const blink::WebMediaConstraints
& constraints
,
248 WebRtcPlayoutDataSource
* playout_data_source
)
249 : render_delay_ms_(0),
250 playout_data_source_(playout_data_source
),
251 audio_mirroring_(false),
252 typing_detected_(false),
254 capture_thread_checker_
.DetachFromThread();
255 render_thread_checker_
.DetachFromThread();
256 InitializeAudioProcessingModule(constraints
, effects
);
258 aec_dump_message_filter_
= AecDumpMessageFilter::Get();
259 // In unit tests not creating a message filter, |aec_dump_message_filter_|
260 // will be NULL. We can just ignore that. Other unit tests and browser tests
261 // ensure that we do get the filter when we should.
262 if (aec_dump_message_filter_
.get())
263 aec_dump_message_filter_
->AddDelegate(this);
266 MediaStreamAudioProcessor::~MediaStreamAudioProcessor() {
267 DCHECK(main_thread_checker_
.CalledOnValidThread());
271 void MediaStreamAudioProcessor::OnCaptureFormatChanged(
272 const media::AudioParameters
& input_format
) {
273 DCHECK(main_thread_checker_
.CalledOnValidThread());
274 // There is no need to hold a lock here since the caller guarantees that
275 // there is no more PushCaptureData() and ProcessAndConsumeData() callbacks
276 // on the capture thread.
277 InitializeCaptureFifo(input_format
);
279 // Reset the |capture_thread_checker_| since the capture data will come from
280 // a new capture thread.
281 capture_thread_checker_
.DetachFromThread();
284 void MediaStreamAudioProcessor::PushCaptureData(
285 const media::AudioBus
& audio_source
,
286 base::TimeDelta capture_delay
) {
287 DCHECK(capture_thread_checker_
.CalledOnValidThread());
289 capture_fifo_
->Push(audio_source
, capture_delay
);
292 bool MediaStreamAudioProcessor::ProcessAndConsumeData(
295 media::AudioBus
** processed_data
,
296 base::TimeDelta
* capture_delay
,
298 DCHECK(capture_thread_checker_
.CalledOnValidThread());
299 DCHECK(processed_data
);
300 DCHECK(capture_delay
);
303 TRACE_EVENT0("audio", "MediaStreamAudioProcessor::ProcessAndConsumeData");
305 MediaStreamAudioBus
* process_bus
;
306 if (!capture_fifo_
->Consume(&process_bus
, capture_delay
))
309 // Use the process bus directly if audio processing is disabled.
310 MediaStreamAudioBus
* output_bus
= process_bus
;
312 if (audio_processing_
) {
313 output_bus
= output_bus_
.get();
314 *new_volume
= ProcessData(process_bus
->channel_ptrs(),
315 process_bus
->bus()->frames(), *capture_delay
,
316 volume
, key_pressed
, output_bus
->channel_ptrs());
319 // Swap channels before interleaving the data.
320 if (audio_mirroring_
&&
321 output_format_
.channel_layout() == media::CHANNEL_LAYOUT_STEREO
) {
322 // Swap the first and second channels.
323 output_bus
->bus()->SwapChannels(0, 1);
326 *processed_data
= output_bus
->bus();
331 void MediaStreamAudioProcessor::Stop() {
332 DCHECK(main_thread_checker_
.CalledOnValidThread());
338 if (aec_dump_message_filter_
.get()) {
339 aec_dump_message_filter_
->RemoveDelegate(this);
340 aec_dump_message_filter_
= NULL
;
343 if (!audio_processing_
.get())
346 StopEchoCancellationDump(audio_processing_
.get());
348 if (playout_data_source_
) {
349 playout_data_source_
->RemovePlayoutSink(this);
350 playout_data_source_
= NULL
;
354 const media::AudioParameters
& MediaStreamAudioProcessor::InputFormat() const {
355 return input_format_
;
358 const media::AudioParameters
& MediaStreamAudioProcessor::OutputFormat() const {
359 return output_format_
;
362 void MediaStreamAudioProcessor::OnAecDumpFile(
363 const IPC::PlatformFileForTransit
& file_handle
) {
364 DCHECK(main_thread_checker_
.CalledOnValidThread());
366 base::File file
= IPC::PlatformFileForTransitToFile(file_handle
);
367 DCHECK(file
.IsValid());
369 if (audio_processing_
)
370 StartEchoCancellationDump(audio_processing_
.get(), file
.Pass());
375 void MediaStreamAudioProcessor::OnDisableAecDump() {
376 DCHECK(main_thread_checker_
.CalledOnValidThread());
377 if (audio_processing_
)
378 StopEchoCancellationDump(audio_processing_
.get());
381 void MediaStreamAudioProcessor::OnIpcClosing() {
382 DCHECK(main_thread_checker_
.CalledOnValidThread());
383 aec_dump_message_filter_
= NULL
;
386 void MediaStreamAudioProcessor::OnPlayoutData(media::AudioBus
* audio_bus
,
388 int audio_delay_milliseconds
) {
389 DCHECK(render_thread_checker_
.CalledOnValidThread());
390 DCHECK(audio_processing_
->echo_control_mobile()->is_enabled() ^
391 audio_processing_
->echo_cancellation()->is_enabled());
393 TRACE_EVENT0("audio", "MediaStreamAudioProcessor::OnPlayoutData");
394 DCHECK_LT(audio_delay_milliseconds
,
395 std::numeric_limits
<base::subtle::Atomic32
>::max());
396 base::subtle::Release_Store(&render_delay_ms_
, audio_delay_milliseconds
);
398 InitializeRenderFifoIfNeeded(sample_rate
, audio_bus
->channels(),
399 audio_bus
->frames());
402 *audio_bus
, base::TimeDelta::FromMilliseconds(audio_delay_milliseconds
));
403 MediaStreamAudioBus
* analysis_bus
;
404 base::TimeDelta audio_delay
;
405 while (render_fifo_
->Consume(&analysis_bus
, &audio_delay
)) {
406 // TODO(ajm): Should AnalyzeReverseStream() account for the |audio_delay|?
407 audio_processing_
->AnalyzeReverseStream(
408 analysis_bus
->channel_ptrs(),
409 analysis_bus
->bus()->frames(),
411 ChannelsToLayout(audio_bus
->channels()));
415 void MediaStreamAudioProcessor::OnPlayoutDataSourceChanged() {
416 DCHECK(main_thread_checker_
.CalledOnValidThread());
417 // There is no need to hold a lock here since the caller guarantees that
418 // there is no more OnPlayoutData() callback on the render thread.
419 render_thread_checker_
.DetachFromThread();
420 render_fifo_
.reset();
423 void MediaStreamAudioProcessor::GetStats(AudioProcessorStats
* stats
) {
424 stats
->typing_noise_detected
=
425 (base::subtle::Acquire_Load(&typing_detected_
) != false);
426 GetAecStats(audio_processing_
.get()->echo_cancellation(), stats
);
429 void MediaStreamAudioProcessor::InitializeAudioProcessingModule(
430 const blink::WebMediaConstraints
& constraints
, int effects
) {
431 DCHECK(main_thread_checker_
.CalledOnValidThread());
432 DCHECK(!audio_processing_
);
434 MediaAudioConstraints
audio_constraints(constraints
, effects
);
436 // Audio mirroring can be enabled even though audio processing is otherwise
438 audio_mirroring_
= audio_constraints
.GetProperty(
439 MediaAudioConstraints::kGoogAudioMirroring
);
442 // On iOS, VPIO provides built-in AGC and AEC.
443 const bool echo_cancellation
= false;
444 const bool goog_agc
= false;
446 const bool echo_cancellation
=
447 audio_constraints
.GetEchoCancellationProperty();
448 const bool goog_agc
= audio_constraints
.GetProperty(
449 MediaAudioConstraints::kGoogAutoGainControl
);
452 #if defined(OS_IOS) || defined(OS_ANDROID)
453 const bool goog_experimental_aec
= false;
454 const bool goog_typing_detection
= false;
456 const bool goog_experimental_aec
= audio_constraints
.GetProperty(
457 MediaAudioConstraints::kGoogExperimentalEchoCancellation
);
458 const bool goog_typing_detection
= audio_constraints
.GetProperty(
459 MediaAudioConstraints::kGoogTypingNoiseDetection
);
462 const bool goog_ns
= audio_constraints
.GetProperty(
463 MediaAudioConstraints::kGoogNoiseSuppression
);
464 const bool goog_experimental_ns
= audio_constraints
.GetProperty(
465 MediaAudioConstraints::kGoogExperimentalNoiseSuppression
);
466 const bool goog_beamforming
= IsBeamformingEnabled(audio_constraints
);
467 const bool goog_high_pass_filter
= audio_constraints
.GetProperty(
468 MediaAudioConstraints::kGoogHighpassFilter
);
469 // Return immediately if no goog constraint is enabled.
470 if (!echo_cancellation
&& !goog_experimental_aec
&& !goog_ns
&&
471 !goog_high_pass_filter
&& !goog_typing_detection
&&
472 !goog_agc
&& !goog_experimental_ns
&& !goog_beamforming
) {
473 RecordProcessingState(AUDIO_PROCESSING_DISABLED
);
477 // Experimental options provided at creation.
478 webrtc::Config config
;
479 if (goog_experimental_aec
)
480 config
.Set
<webrtc::ExtendedFilter
>(new webrtc::ExtendedFilter(true));
481 if (goog_experimental_ns
)
482 config
.Set
<webrtc::ExperimentalNs
>(new webrtc::ExperimentalNs(true));
483 if (IsDelayAgnosticAecEnabled())
484 config
.Set
<webrtc::ReportedDelay
>(new webrtc::ReportedDelay(false));
485 if (goog_beamforming
) {
486 ConfigureBeamforming(&config
, audio_constraints
.GetPropertyAsString(
487 MediaAudioConstraints::kGoogArrayGeometry
));
490 // Create and configure the webrtc::AudioProcessing.
491 audio_processing_
.reset(webrtc::AudioProcessing::Create(config
));
493 // Enable the audio processing components.
494 if (echo_cancellation
) {
495 EnableEchoCancellation(audio_processing_
.get());
497 if (playout_data_source_
)
498 playout_data_source_
->AddPlayoutSink(this);
500 // Prepare for logging echo information. If there are data remaining in
501 // |echo_information_| we simply discard it.
502 echo_information_
.reset(new EchoInformation());
506 // The beamforming postfilter is effective at suppressing stationary noise,
507 // so reduce the single-channel NS aggressiveness when enabled.
508 const NoiseSuppression::Level ns_level
=
509 config
.Get
<webrtc::Beamforming
>().enabled
? NoiseSuppression::kLow
510 : NoiseSuppression::kHigh
;
512 EnableNoiseSuppression(audio_processing_
.get(), ns_level
);
515 if (goog_high_pass_filter
)
516 EnableHighPassFilter(audio_processing_
.get());
518 if (goog_typing_detection
) {
519 // TODO(xians): Remove this |typing_detector_| after the typing suppression
520 // is enabled by default.
521 typing_detector_
.reset(new webrtc::TypingDetection());
522 EnableTypingDetection(audio_processing_
.get(), typing_detector_
.get());
526 EnableAutomaticGainControl(audio_processing_
.get());
528 RecordProcessingState(AUDIO_PROCESSING_ENABLED
);
531 void MediaStreamAudioProcessor::ConfigureBeamforming(
532 webrtc::Config
* config
,
533 const std::string
& geometry_str
) const {
534 std::vector
<webrtc::Point
> geometry
= ParseArrayGeometry(geometry_str
);
535 #if defined(OS_CHROMEOS)
536 if(geometry
.size() == 0) {
537 const std::string board
= base::SysInfo::GetLsbReleaseBoard();
538 if (board
.find("peach_pi") != std::string::npos
) {
539 geometry
.push_back(webrtc::Point(-0.025f
, 0.f
, 0.f
));
540 geometry
.push_back(webrtc::Point(0.025f
, 0.f
, 0.f
));
541 } else if (board
.find("swanky") != std::string::npos
) {
542 geometry
.push_back(webrtc::Point(-0.026f
, 0.f
, 0.f
));
543 geometry
.push_back(webrtc::Point(0.026f
, 0.f
, 0.f
));
544 } else if (board
.find("samus") != std::string::npos
) {
545 geometry
.push_back(webrtc::Point(-0.032f
, 0.f
, 0.f
));
546 geometry
.push_back(webrtc::Point(0.032f
, 0.f
, 0.f
));
550 config
->Set
<webrtc::Beamforming
>(new webrtc::Beamforming(geometry
.size() > 1,
554 std::vector
<webrtc::Point
> MediaStreamAudioProcessor::ParseArrayGeometry(
555 const std::string
& geometry_str
) const {
556 std::vector
<webrtc::Point
> result
;
557 std::vector
<float> values
;
558 std::istringstream
str(geometry_str
);
559 std::copy(std::istream_iterator
<float>(str
),
560 std::istream_iterator
<float>(),
561 std::back_inserter(values
));
562 if (values
.size() % 3 == 0) {
563 for (size_t i
= 0; i
< values
.size(); i
+= 3) {
564 result
.push_back(webrtc::Point(values
[i
+ 0],
572 void MediaStreamAudioProcessor::InitializeCaptureFifo(
573 const media::AudioParameters
& input_format
) {
574 DCHECK(main_thread_checker_
.CalledOnValidThread());
575 DCHECK(input_format
.IsValid());
576 input_format_
= input_format
;
578 // TODO(ajm): For now, we assume fixed parameters for the output when audio
579 // processing is enabled, to match the previous behavior. We should either
580 // use the input parameters (in which case, audio processing will convert
581 // at output) or ideally, have a backchannel from the sink to know what
582 // format it would prefer.
583 #if defined(OS_ANDROID)
584 int audio_processing_sample_rate
= AudioProcessing::kSampleRate16kHz
;
586 int audio_processing_sample_rate
= AudioProcessing::kSampleRate48kHz
;
588 const int output_sample_rate
= audio_processing_
?
589 audio_processing_sample_rate
:
590 input_format
.sample_rate();
591 media::ChannelLayout output_channel_layout
= audio_processing_
?
592 media::GuessChannelLayout(kAudioProcessingNumberOfChannels
) :
593 input_format
.channel_layout();
595 // The output channels from the fifo is normally the same as input.
596 int fifo_output_channels
= input_format
.channels();
598 // Special case for if we have a keyboard mic channel on the input and no
599 // audio processing is used. We will then have the fifo strip away that
600 // channel. So we use stereo as output layout, and also change the output
601 // channels for the fifo.
602 if (input_format
.channel_layout() ==
603 media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC
&&
604 !audio_processing_
) {
605 output_channel_layout
= media::CHANNEL_LAYOUT_STEREO
;
606 fifo_output_channels
= ChannelLayoutToChannelCount(output_channel_layout
);
609 // webrtc::AudioProcessing requires a 10 ms chunk size. We use this native
610 // size when processing is enabled. When disabled we use the same size as
611 // the source if less than 10 ms.
613 // TODO(ajm): This conditional buffer size appears to be assuming knowledge of
614 // the sink based on the source parameters. PeerConnection sinks seem to want
615 // 10 ms chunks regardless, while WebAudio sinks want less, and we're assuming
616 // we can identify WebAudio sinks by the input chunk size. Less fragile would
617 // be to have the sink actually tell us how much it wants (as in the above
619 int processing_frames
= input_format
.sample_rate() / 100;
620 int output_frames
= output_sample_rate
/ 100;
621 if (!audio_processing_
&& input_format
.frames_per_buffer() < output_frames
) {
622 processing_frames
= input_format
.frames_per_buffer();
623 output_frames
= processing_frames
;
626 output_format_
= media::AudioParameters(
627 media::AudioParameters::AUDIO_PCM_LOW_LATENCY
,
628 output_channel_layout
,
634 new MediaStreamAudioFifo(input_format
.channels(),
635 fifo_output_channels
,
636 input_format
.frames_per_buffer(),
638 input_format
.sample_rate()));
640 if (audio_processing_
) {
641 output_bus_
.reset(new MediaStreamAudioBus(output_format_
.channels(),
646 void MediaStreamAudioProcessor::InitializeRenderFifoIfNeeded(
647 int sample_rate
, int number_of_channels
, int frames_per_buffer
) {
648 DCHECK(render_thread_checker_
.CalledOnValidThread());
649 if (render_fifo_
.get() &&
650 render_format_
.sample_rate() == sample_rate
&&
651 render_format_
.channels() == number_of_channels
&&
652 render_format_
.frames_per_buffer() == frames_per_buffer
) {
653 // Do nothing if the |render_fifo_| has been setup properly.
657 render_format_
= media::AudioParameters(
658 media::AudioParameters::AUDIO_PCM_LOW_LATENCY
,
659 media::GuessChannelLayout(number_of_channels
),
664 const int analysis_frames
= sample_rate
/ 100; // 10 ms chunks.
666 new MediaStreamAudioFifo(number_of_channels
,
673 int MediaStreamAudioProcessor::ProcessData(const float* const* process_ptrs
,
675 base::TimeDelta capture_delay
,
678 float* const* output_ptrs
) {
679 DCHECK(audio_processing_
);
680 DCHECK(capture_thread_checker_
.CalledOnValidThread());
682 TRACE_EVENT0("audio", "MediaStreamAudioProcessor::ProcessData");
684 base::subtle::Atomic32 render_delay_ms
=
685 base::subtle::Acquire_Load(&render_delay_ms_
);
686 int64 capture_delay_ms
= capture_delay
.InMilliseconds();
687 DCHECK_LT(capture_delay_ms
,
688 std::numeric_limits
<base::subtle::Atomic32
>::max());
689 int total_delay_ms
= capture_delay_ms
+ render_delay_ms
;
690 if (total_delay_ms
> 300) {
691 LOG(WARNING
) << "Large audio delay, capture delay: " << capture_delay_ms
692 << "ms; render delay: " << render_delay_ms
<< "ms";
695 webrtc::AudioProcessing
* ap
= audio_processing_
.get();
696 ap
->set_stream_delay_ms(total_delay_ms
);
698 DCHECK_LE(volume
, WebRtcAudioDeviceImpl::kMaxVolumeLevel
);
699 webrtc::GainControl
* agc
= ap
->gain_control();
700 int err
= agc
->set_stream_analog_level(volume
);
701 DCHECK_EQ(err
, 0) << "set_stream_analog_level() error: " << err
;
703 ap
->set_stream_key_pressed(key_pressed
);
705 err
= ap
->ProcessStream(process_ptrs
,
707 input_format_
.sample_rate(),
708 MapLayout(input_format_
.channel_layout()),
709 output_format_
.sample_rate(),
710 MapLayout(output_format_
.channel_layout()),
712 DCHECK_EQ(err
, 0) << "ProcessStream() error: " << err
;
714 if (typing_detector_
) {
715 webrtc::VoiceDetection
* vad
= ap
->voice_detection();
716 DCHECK(vad
->is_enabled());
717 bool detected
= typing_detector_
->Process(key_pressed
,
718 vad
->stream_has_voice());
719 base::subtle::Release_Store(&typing_detected_
, detected
);
722 if (echo_information_
) {
723 echo_information_
.get()->UpdateAecDelayStats(ap
->echo_cancellation());
726 // Return 0 if the volume hasn't been changed, and otherwise the new volume.
727 return (agc
->stream_analog_level() == volume
) ?
728 0 : agc
->stream_analog_level();
731 } // namespace content