1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/renderer/media/media_stream_audio_processor.h"
7 #include "base/command_line.h"
8 #include "base/metrics/field_trial.h"
9 #include "base/metrics/histogram.h"
10 #include "base/trace_event/trace_event.h"
11 #include "content/public/common/content_switches.h"
12 #include "content/renderer/media/media_stream_audio_processor_options.h"
13 #include "content/renderer/media/rtc_media_constraints.h"
14 #include "content/renderer/media/webrtc_audio_device_impl.h"
15 #include "media/audio/audio_parameters.h"
16 #include "media/base/audio_converter.h"
17 #include "media/base/audio_fifo.h"
18 #include "media/base/channel_layout.h"
19 #include "third_party/WebKit/public/platform/WebMediaConstraints.h"
20 #include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface.h"
21 #include "third_party/webrtc/modules/audio_processing/typing_detection.h"
27 using webrtc::AudioProcessing
;
28 using webrtc::NoiseSuppression
;
30 const int kAudioProcessingNumberOfChannels
= 1;
32 AudioProcessing::ChannelLayout
MapLayout(media::ChannelLayout media_layout
) {
33 switch (media_layout
) {
34 case media::CHANNEL_LAYOUT_MONO
:
35 return AudioProcessing::kMono
;
36 case media::CHANNEL_LAYOUT_STEREO
:
37 return AudioProcessing::kStereo
;
38 case media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC
:
39 return AudioProcessing::kStereoAndKeyboard
;
41 NOTREACHED() << "Layout not supported: " << media_layout
;
42 return AudioProcessing::kMono
;
46 // This is only used for playout data where only max two channels is supported.
47 AudioProcessing::ChannelLayout
ChannelsToLayout(int num_channels
) {
48 switch (num_channels
) {
50 return AudioProcessing::kMono
;
52 return AudioProcessing::kStereo
;
54 NOTREACHED() << "Channels not supported: " << num_channels
;
55 return AudioProcessing::kMono
;
59 // Used by UMA histograms and entries shouldn't be re-ordered or removed.
60 enum AudioTrackProcessingStates
{
61 AUDIO_PROCESSING_ENABLED
= 0,
62 AUDIO_PROCESSING_DISABLED
,
63 AUDIO_PROCESSING_IN_WEBRTC
,
67 void RecordProcessingState(AudioTrackProcessingStates state
) {
68 UMA_HISTOGRAM_ENUMERATION("Media.AudioTrackProcessingStates",
69 state
, AUDIO_PROCESSING_MAX
);
72 bool IsDelayAgnosticAecEnabled() {
73 // Note: It's important to query the field trial state first, to ensure that
74 // UMA reports the correct group.
75 const std::string group_name
=
76 base::FieldTrialList::FindFullName("UseDelayAgnosticAEC");
77 base::CommandLine
* command_line
= base::CommandLine::ForCurrentProcess();
78 if (command_line
->HasSwitch(switches::kEnableDelayAgnosticAec
))
80 if (command_line
->HasSwitch(switches::kDisableDelayAgnosticAec
))
83 return (group_name
== "Enabled" || group_name
== "DefaultEnabled");
86 bool IsBeamformingEnabled(const MediaAudioConstraints
& audio_constraints
) {
87 return base::FieldTrialList::FindFullName("ChromebookBeamforming") ==
89 audio_constraints
.GetProperty(MediaAudioConstraints::kGoogBeamforming
);
94 // Wraps AudioBus to provide access to the array of channel pointers, since this
95 // is the type webrtc::AudioProcessing deals in. The array is refreshed on every
96 // channel_ptrs() call, and will be valid until the underlying AudioBus pointers
97 // are changed, e.g. through calls to SetChannelData() or SwapChannels().
99 // All methods are called on one of the capture or render audio threads
101 class MediaStreamAudioBus
{
103 MediaStreamAudioBus(int channels
, int frames
)
104 : bus_(media::AudioBus::Create(channels
, frames
)),
105 channel_ptrs_(new float*[channels
]) {
106 // May be created in the main render thread and used in the audio threads.
107 thread_checker_
.DetachFromThread();
110 media::AudioBus
* bus() {
111 DCHECK(thread_checker_
.CalledOnValidThread());
115 float* const* channel_ptrs() {
116 DCHECK(thread_checker_
.CalledOnValidThread());
117 for (int i
= 0; i
< bus_
->channels(); ++i
) {
118 channel_ptrs_
[i
] = bus_
->channel(i
);
120 return channel_ptrs_
.get();
124 base::ThreadChecker thread_checker_
;
125 scoped_ptr
<media::AudioBus
> bus_
;
126 scoped_ptr
<float*[]> channel_ptrs_
;
129 // Wraps AudioFifo to provide a cleaner interface to MediaStreamAudioProcessor.
130 // It avoids the FIFO when the source and destination frames match. All methods
131 // are called on one of the capture or render audio threads exclusively. If
132 // |source_channels| is larger than |destination_channels|, only the first
133 // |destination_channels| are kept from the source.
134 class MediaStreamAudioFifo
{
136 MediaStreamAudioFifo(int source_channels
,
137 int destination_channels
,
139 int destination_frames
,
141 : source_channels_(source_channels
),
142 source_frames_(source_frames
),
143 sample_rate_(sample_rate
),
145 new MediaStreamAudioBus(destination_channels
, destination_frames
)),
146 data_available_(false) {
147 DCHECK_GE(source_channels
, destination_channels
);
148 DCHECK_GT(sample_rate_
, 0);
150 if (source_channels
> destination_channels
) {
151 audio_source_intermediate_
=
152 media::AudioBus::CreateWrapper(destination_channels
);
155 if (source_frames
!= destination_frames
) {
156 // Since we require every Push to be followed by as many Consumes as
157 // possible, twice the larger of the two is a (probably) loose upper bound
159 const int fifo_frames
= 2 * std::max(source_frames
, destination_frames
);
160 fifo_
.reset(new media::AudioFifo(destination_channels
, fifo_frames
));
163 // May be created in the main render thread and used in the audio threads.
164 thread_checker_
.DetachFromThread();
167 void Push(const media::AudioBus
& source
, base::TimeDelta audio_delay
) {
168 DCHECK(thread_checker_
.CalledOnValidThread());
169 DCHECK_EQ(source
.channels(), source_channels_
);
170 DCHECK_EQ(source
.frames(), source_frames_
);
172 const media::AudioBus
* source_to_push
= &source
;
174 if (audio_source_intermediate_
) {
175 for (int i
= 0; i
< destination_
->bus()->channels(); ++i
) {
176 audio_source_intermediate_
->SetChannelData(
178 const_cast<float*>(source
.channel(i
)));
180 audio_source_intermediate_
->set_frames(source
.frames());
181 source_to_push
= audio_source_intermediate_
.get();
185 CHECK_LT(fifo_
->frames(), destination_
->bus()->frames());
186 next_audio_delay_
= audio_delay
+
187 fifo_
->frames() * base::TimeDelta::FromSeconds(1) / sample_rate_
;
188 fifo_
->Push(source_to_push
);
190 CHECK(!data_available_
);
191 source_to_push
->CopyTo(destination_
->bus());
192 next_audio_delay_
= audio_delay
;
193 data_available_
= true;
197 // Returns true if there are destination_frames() of data available to be
198 // consumed, and otherwise false.
199 bool Consume(MediaStreamAudioBus
** destination
,
200 base::TimeDelta
* audio_delay
) {
201 DCHECK(thread_checker_
.CalledOnValidThread());
204 if (fifo_
->frames() < destination_
->bus()->frames())
207 fifo_
->Consume(destination_
->bus(), 0, destination_
->bus()->frames());
208 *audio_delay
= next_audio_delay_
;
210 destination_
->bus()->frames() * base::TimeDelta::FromSeconds(1) /
213 if (!data_available_
)
215 *audio_delay
= next_audio_delay_
;
216 // The data was already copied to |destination_| in this case.
217 data_available_
= false;
220 *destination
= destination_
.get();
225 base::ThreadChecker thread_checker_
;
226 const int source_channels_
; // For a DCHECK.
227 const int source_frames_
; // For a DCHECK.
228 const int sample_rate_
;
229 scoped_ptr
<media::AudioBus
> audio_source_intermediate_
;
230 scoped_ptr
<MediaStreamAudioBus
> destination_
;
231 scoped_ptr
<media::AudioFifo
> fifo_
;
233 // When using |fifo_|, this is the audio delay of the first sample to be
234 // consumed next from the FIFO. When not using |fifo_|, this is the audio
235 // delay of the first sample in |destination_|.
236 base::TimeDelta next_audio_delay_
;
238 // True when |destination_| contains the data to be returned by the next call
239 // to Consume(). Only used when the FIFO is disabled.
240 bool data_available_
;
243 MediaStreamAudioProcessor::MediaStreamAudioProcessor(
244 const blink::WebMediaConstraints
& constraints
,
245 const MediaStreamDevice::AudioDeviceParameters
& input_params
,
246 WebRtcPlayoutDataSource
* playout_data_source
)
247 : render_delay_ms_(0),
248 playout_data_source_(playout_data_source
),
249 audio_mirroring_(false),
250 typing_detected_(false),
252 capture_thread_checker_
.DetachFromThread();
253 render_thread_checker_
.DetachFromThread();
254 InitializeAudioProcessingModule(constraints
, input_params
);
256 aec_dump_message_filter_
= AecDumpMessageFilter::Get();
257 // In unit tests not creating a message filter, |aec_dump_message_filter_|
258 // will be NULL. We can just ignore that. Other unit tests and browser tests
259 // ensure that we do get the filter when we should.
260 if (aec_dump_message_filter_
.get())
261 aec_dump_message_filter_
->AddDelegate(this);
264 MediaStreamAudioProcessor::~MediaStreamAudioProcessor() {
265 DCHECK(main_thread_checker_
.CalledOnValidThread());
269 void MediaStreamAudioProcessor::OnCaptureFormatChanged(
270 const media::AudioParameters
& input_format
) {
271 DCHECK(main_thread_checker_
.CalledOnValidThread());
272 // There is no need to hold a lock here since the caller guarantees that
273 // there is no more PushCaptureData() and ProcessAndConsumeData() callbacks
274 // on the capture thread.
275 InitializeCaptureFifo(input_format
);
277 // Reset the |capture_thread_checker_| since the capture data will come from
278 // a new capture thread.
279 capture_thread_checker_
.DetachFromThread();
282 void MediaStreamAudioProcessor::PushCaptureData(
283 const media::AudioBus
& audio_source
,
284 base::TimeDelta capture_delay
) {
285 DCHECK(capture_thread_checker_
.CalledOnValidThread());
287 capture_fifo_
->Push(audio_source
, capture_delay
);
290 bool MediaStreamAudioProcessor::ProcessAndConsumeData(
293 media::AudioBus
** processed_data
,
294 base::TimeDelta
* capture_delay
,
296 DCHECK(capture_thread_checker_
.CalledOnValidThread());
297 DCHECK(processed_data
);
298 DCHECK(capture_delay
);
301 TRACE_EVENT0("audio", "MediaStreamAudioProcessor::ProcessAndConsumeData");
303 MediaStreamAudioBus
* process_bus
;
304 if (!capture_fifo_
->Consume(&process_bus
, capture_delay
))
307 // Use the process bus directly if audio processing is disabled.
308 MediaStreamAudioBus
* output_bus
= process_bus
;
310 if (audio_processing_
) {
311 output_bus
= output_bus_
.get();
312 *new_volume
= ProcessData(process_bus
->channel_ptrs(),
313 process_bus
->bus()->frames(), *capture_delay
,
314 volume
, key_pressed
, output_bus
->channel_ptrs());
317 // Swap channels before interleaving the data.
318 if (audio_mirroring_
&&
319 output_format_
.channel_layout() == media::CHANNEL_LAYOUT_STEREO
) {
320 // Swap the first and second channels.
321 output_bus
->bus()->SwapChannels(0, 1);
324 *processed_data
= output_bus
->bus();
329 void MediaStreamAudioProcessor::Stop() {
330 DCHECK(main_thread_checker_
.CalledOnValidThread());
336 if (aec_dump_message_filter_
.get()) {
337 aec_dump_message_filter_
->RemoveDelegate(this);
338 aec_dump_message_filter_
= NULL
;
341 if (!audio_processing_
.get())
344 audio_processing_
.get()->UpdateHistogramsOnCallEnd();
345 StopEchoCancellationDump(audio_processing_
.get());
347 if (playout_data_source_
) {
348 playout_data_source_
->RemovePlayoutSink(this);
349 playout_data_source_
= NULL
;
353 const media::AudioParameters
& MediaStreamAudioProcessor::InputFormat() const {
354 return input_format_
;
357 const media::AudioParameters
& MediaStreamAudioProcessor::OutputFormat() const {
358 return output_format_
;
361 void MediaStreamAudioProcessor::OnAecDumpFile(
362 const IPC::PlatformFileForTransit
& file_handle
) {
363 DCHECK(main_thread_checker_
.CalledOnValidThread());
365 base::File file
= IPC::PlatformFileForTransitToFile(file_handle
);
366 DCHECK(file
.IsValid());
368 if (audio_processing_
)
369 StartEchoCancellationDump(audio_processing_
.get(), file
.Pass());
374 void MediaStreamAudioProcessor::OnDisableAecDump() {
375 DCHECK(main_thread_checker_
.CalledOnValidThread());
376 if (audio_processing_
)
377 StopEchoCancellationDump(audio_processing_
.get());
380 void MediaStreamAudioProcessor::OnIpcClosing() {
381 DCHECK(main_thread_checker_
.CalledOnValidThread());
382 aec_dump_message_filter_
= NULL
;
385 void MediaStreamAudioProcessor::OnPlayoutData(media::AudioBus
* audio_bus
,
387 int audio_delay_milliseconds
) {
388 DCHECK(render_thread_checker_
.CalledOnValidThread());
389 DCHECK(audio_processing_
->echo_control_mobile()->is_enabled() ^
390 audio_processing_
->echo_cancellation()->is_enabled());
392 TRACE_EVENT0("audio", "MediaStreamAudioProcessor::OnPlayoutData");
393 DCHECK_LT(audio_delay_milliseconds
,
394 std::numeric_limits
<base::subtle::Atomic32
>::max());
395 base::subtle::Release_Store(&render_delay_ms_
, audio_delay_milliseconds
);
397 InitializeRenderFifoIfNeeded(sample_rate
, audio_bus
->channels(),
398 audio_bus
->frames());
401 *audio_bus
, base::TimeDelta::FromMilliseconds(audio_delay_milliseconds
));
402 MediaStreamAudioBus
* analysis_bus
;
403 base::TimeDelta audio_delay
;
404 while (render_fifo_
->Consume(&analysis_bus
, &audio_delay
)) {
405 // TODO(ajm): Should AnalyzeReverseStream() account for the |audio_delay|?
406 audio_processing_
->AnalyzeReverseStream(
407 analysis_bus
->channel_ptrs(),
408 analysis_bus
->bus()->frames(),
410 ChannelsToLayout(audio_bus
->channels()));
414 void MediaStreamAudioProcessor::OnPlayoutDataSourceChanged() {
415 DCHECK(main_thread_checker_
.CalledOnValidThread());
416 // There is no need to hold a lock here since the caller guarantees that
417 // there is no more OnPlayoutData() callback on the render thread.
418 render_thread_checker_
.DetachFromThread();
419 render_fifo_
.reset();
422 void MediaStreamAudioProcessor::GetStats(AudioProcessorStats
* stats
) {
423 stats
->typing_noise_detected
=
424 (base::subtle::Acquire_Load(&typing_detected_
) != false);
425 GetAecStats(audio_processing_
.get()->echo_cancellation(), stats
);
428 void MediaStreamAudioProcessor::InitializeAudioProcessingModule(
429 const blink::WebMediaConstraints
& constraints
,
430 const MediaStreamDevice::AudioDeviceParameters
& input_params
) {
431 DCHECK(main_thread_checker_
.CalledOnValidThread());
432 DCHECK(!audio_processing_
);
434 MediaAudioConstraints
audio_constraints(constraints
, input_params
.effects
);
436 // Audio mirroring can be enabled even though audio processing is otherwise
438 audio_mirroring_
= audio_constraints
.GetProperty(
439 MediaAudioConstraints::kGoogAudioMirroring
);
442 // On iOS, VPIO provides built-in AGC and AEC.
443 const bool echo_cancellation
= false;
444 const bool goog_agc
= false;
446 const bool echo_cancellation
=
447 audio_constraints
.GetEchoCancellationProperty();
448 const bool goog_agc
= audio_constraints
.GetProperty(
449 MediaAudioConstraints::kGoogAutoGainControl
);
452 #if defined(OS_IOS) || defined(OS_ANDROID)
453 const bool goog_experimental_aec
= false;
454 const bool goog_typing_detection
= false;
456 const bool goog_experimental_aec
= audio_constraints
.GetProperty(
457 MediaAudioConstraints::kGoogExperimentalEchoCancellation
);
458 const bool goog_typing_detection
= audio_constraints
.GetProperty(
459 MediaAudioConstraints::kGoogTypingNoiseDetection
);
462 const bool goog_ns
= audio_constraints
.GetProperty(
463 MediaAudioConstraints::kGoogNoiseSuppression
);
464 const bool goog_experimental_ns
= audio_constraints
.GetProperty(
465 MediaAudioConstraints::kGoogExperimentalNoiseSuppression
);
466 const bool goog_beamforming
= IsBeamformingEnabled(audio_constraints
);
467 const bool goog_high_pass_filter
= audio_constraints
.GetProperty(
468 MediaAudioConstraints::kGoogHighpassFilter
);
469 // Return immediately if no goog constraint is enabled.
470 if (!echo_cancellation
&& !goog_experimental_aec
&& !goog_ns
&&
471 !goog_high_pass_filter
&& !goog_typing_detection
&&
472 !goog_agc
&& !goog_experimental_ns
&& !goog_beamforming
) {
473 RecordProcessingState(AUDIO_PROCESSING_DISABLED
);
477 // Experimental options provided at creation.
478 webrtc::Config config
;
479 if (goog_experimental_aec
)
480 config
.Set
<webrtc::ExtendedFilter
>(new webrtc::ExtendedFilter(true));
481 if (goog_experimental_ns
)
482 config
.Set
<webrtc::ExperimentalNs
>(new webrtc::ExperimentalNs(true));
483 if (IsDelayAgnosticAecEnabled())
484 config
.Set
<webrtc::DelayAgnostic
>(new webrtc::DelayAgnostic(true));
485 if (goog_beamforming
) {
486 const auto& geometry
=
487 GetArrayGeometryPreferringConstraints(audio_constraints
, input_params
);
489 // Only enable beamforming if we have at least two mics.
490 config
.Set
<webrtc::Beamforming
>(
491 new webrtc::Beamforming(geometry
.size() > 1, geometry
));
494 // Create and configure the webrtc::AudioProcessing.
495 audio_processing_
.reset(webrtc::AudioProcessing::Create(config
));
497 // Enable the audio processing components.
498 if (echo_cancellation
) {
499 EnableEchoCancellation(audio_processing_
.get());
501 if (playout_data_source_
)
502 playout_data_source_
->AddPlayoutSink(this);
504 // Prepare for logging echo information. If there are data remaining in
505 // |echo_information_| we simply discard it.
506 echo_information_
.reset(new EchoInformation());
510 // The beamforming postfilter is effective at suppressing stationary noise,
511 // so reduce the single-channel NS aggressiveness when enabled.
512 const NoiseSuppression::Level ns_level
=
513 config
.Get
<webrtc::Beamforming
>().enabled
? NoiseSuppression::kLow
514 : NoiseSuppression::kHigh
;
516 EnableNoiseSuppression(audio_processing_
.get(), ns_level
);
519 if (goog_high_pass_filter
)
520 EnableHighPassFilter(audio_processing_
.get());
522 if (goog_typing_detection
) {
523 // TODO(xians): Remove this |typing_detector_| after the typing suppression
524 // is enabled by default.
525 typing_detector_
.reset(new webrtc::TypingDetection());
526 EnableTypingDetection(audio_processing_
.get(), typing_detector_
.get());
530 EnableAutomaticGainControl(audio_processing_
.get());
532 RecordProcessingState(AUDIO_PROCESSING_ENABLED
);
535 void MediaStreamAudioProcessor::InitializeCaptureFifo(
536 const media::AudioParameters
& input_format
) {
537 DCHECK(main_thread_checker_
.CalledOnValidThread());
538 DCHECK(input_format
.IsValid());
539 input_format_
= input_format
;
541 // TODO(ajm): For now, we assume fixed parameters for the output when audio
542 // processing is enabled, to match the previous behavior. We should either
543 // use the input parameters (in which case, audio processing will convert
544 // at output) or ideally, have a backchannel from the sink to know what
545 // format it would prefer.
546 #if defined(OS_ANDROID)
547 int audio_processing_sample_rate
= AudioProcessing::kSampleRate16kHz
;
549 int audio_processing_sample_rate
= AudioProcessing::kSampleRate48kHz
;
551 const int output_sample_rate
= audio_processing_
?
552 audio_processing_sample_rate
:
553 input_format
.sample_rate();
554 media::ChannelLayout output_channel_layout
= audio_processing_
?
555 media::GuessChannelLayout(kAudioProcessingNumberOfChannels
) :
556 input_format
.channel_layout();
558 // The output channels from the fifo is normally the same as input.
559 int fifo_output_channels
= input_format
.channels();
561 // Special case for if we have a keyboard mic channel on the input and no
562 // audio processing is used. We will then have the fifo strip away that
563 // channel. So we use stereo as output layout, and also change the output
564 // channels for the fifo.
565 if (input_format
.channel_layout() ==
566 media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC
&&
567 !audio_processing_
) {
568 output_channel_layout
= media::CHANNEL_LAYOUT_STEREO
;
569 fifo_output_channels
= ChannelLayoutToChannelCount(output_channel_layout
);
572 // webrtc::AudioProcessing requires a 10 ms chunk size. We use this native
573 // size when processing is enabled. When disabled we use the same size as
574 // the source if less than 10 ms.
576 // TODO(ajm): This conditional buffer size appears to be assuming knowledge of
577 // the sink based on the source parameters. PeerConnection sinks seem to want
578 // 10 ms chunks regardless, while WebAudio sinks want less, and we're assuming
579 // we can identify WebAudio sinks by the input chunk size. Less fragile would
580 // be to have the sink actually tell us how much it wants (as in the above
582 int processing_frames
= input_format
.sample_rate() / 100;
583 int output_frames
= output_sample_rate
/ 100;
584 if (!audio_processing_
&& input_format
.frames_per_buffer() < output_frames
) {
585 processing_frames
= input_format
.frames_per_buffer();
586 output_frames
= processing_frames
;
589 output_format_
= media::AudioParameters(
590 media::AudioParameters::AUDIO_PCM_LOW_LATENCY
,
591 output_channel_layout
,
597 new MediaStreamAudioFifo(input_format
.channels(),
598 fifo_output_channels
,
599 input_format
.frames_per_buffer(),
601 input_format
.sample_rate()));
603 if (audio_processing_
) {
604 output_bus_
.reset(new MediaStreamAudioBus(output_format_
.channels(),
609 void MediaStreamAudioProcessor::InitializeRenderFifoIfNeeded(
610 int sample_rate
, int number_of_channels
, int frames_per_buffer
) {
611 DCHECK(render_thread_checker_
.CalledOnValidThread());
612 if (render_fifo_
.get() &&
613 render_format_
.sample_rate() == sample_rate
&&
614 render_format_
.channels() == number_of_channels
&&
615 render_format_
.frames_per_buffer() == frames_per_buffer
) {
616 // Do nothing if the |render_fifo_| has been setup properly.
620 render_format_
= media::AudioParameters(
621 media::AudioParameters::AUDIO_PCM_LOW_LATENCY
,
622 media::GuessChannelLayout(number_of_channels
),
627 const int analysis_frames
= sample_rate
/ 100; // 10 ms chunks.
629 new MediaStreamAudioFifo(number_of_channels
,
636 int MediaStreamAudioProcessor::ProcessData(const float* const* process_ptrs
,
638 base::TimeDelta capture_delay
,
641 float* const* output_ptrs
) {
642 DCHECK(audio_processing_
);
643 DCHECK(capture_thread_checker_
.CalledOnValidThread());
645 TRACE_EVENT0("audio", "MediaStreamAudioProcessor::ProcessData");
647 base::subtle::Atomic32 render_delay_ms
=
648 base::subtle::Acquire_Load(&render_delay_ms_
);
649 int64 capture_delay_ms
= capture_delay
.InMilliseconds();
650 DCHECK_LT(capture_delay_ms
,
651 std::numeric_limits
<base::subtle::Atomic32
>::max());
652 int total_delay_ms
= capture_delay_ms
+ render_delay_ms
;
653 if (total_delay_ms
> 300) {
654 LOG(WARNING
) << "Large audio delay, capture delay: " << capture_delay_ms
655 << "ms; render delay: " << render_delay_ms
<< "ms";
658 webrtc::AudioProcessing
* ap
= audio_processing_
.get();
659 ap
->set_stream_delay_ms(total_delay_ms
);
661 DCHECK_LE(volume
, WebRtcAudioDeviceImpl::kMaxVolumeLevel
);
662 webrtc::GainControl
* agc
= ap
->gain_control();
663 int err
= agc
->set_stream_analog_level(volume
);
664 DCHECK_EQ(err
, 0) << "set_stream_analog_level() error: " << err
;
666 ap
->set_stream_key_pressed(key_pressed
);
668 err
= ap
->ProcessStream(process_ptrs
,
670 input_format_
.sample_rate(),
671 MapLayout(input_format_
.channel_layout()),
672 output_format_
.sample_rate(),
673 MapLayout(output_format_
.channel_layout()),
675 DCHECK_EQ(err
, 0) << "ProcessStream() error: " << err
;
677 if (typing_detector_
) {
678 webrtc::VoiceDetection
* vad
= ap
->voice_detection();
679 DCHECK(vad
->is_enabled());
680 bool detected
= typing_detector_
->Process(key_pressed
,
681 vad
->stream_has_voice());
682 base::subtle::Release_Store(&typing_detected_
, detected
);
685 if (echo_information_
) {
686 echo_information_
.get()->UpdateAecDelayStats(ap
->echo_cancellation());
689 // Return 0 if the volume hasn't been changed, and otherwise the new volume.
690 return (agc
->stream_analog_level() == volume
) ?
691 0 : agc
->stream_analog_level();
694 } // namespace content