Enable right clicking on the applist doodle web contents and log the data.
[chromium-blink-merge.git] / content / renderer / media / media_stream_audio_processor.cc
blob2c560e10007e187d570b1006da12fa69f2a06be1
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/renderer/media/media_stream_audio_processor.h"
7 #include "base/command_line.h"
8 #include "base/metrics/field_trial.h"
9 #include "base/metrics/histogram.h"
10 #include "base/trace_event/trace_event.h"
11 #include "content/public/common/content_switches.h"
12 #include "content/renderer/media/media_stream_audio_processor_options.h"
13 #include "content/renderer/media/rtc_media_constraints.h"
14 #include "content/renderer/media/webrtc_audio_device_impl.h"
15 #include "media/audio/audio_parameters.h"
16 #include "media/base/audio_converter.h"
17 #include "media/base/audio_fifo.h"
18 #include "media/base/channel_layout.h"
19 #include "third_party/WebKit/public/platform/WebMediaConstraints.h"
20 #include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface.h"
21 #include "third_party/webrtc/modules/audio_processing/typing_detection.h"
23 #if defined(OS_CHROMEOS)
24 #include "base/sys_info.h"
25 #endif
27 namespace content {
29 namespace {
31 using webrtc::AudioProcessing;
33 #if defined(OS_ANDROID)
34 const int kAudioProcessingSampleRate = 16000;
35 #else
36 const int kAudioProcessingSampleRate = 32000;
37 #endif
38 const int kAudioProcessingNumberOfChannels = 1;
40 AudioProcessing::ChannelLayout MapLayout(media::ChannelLayout media_layout) {
41 switch (media_layout) {
42 case media::CHANNEL_LAYOUT_MONO:
43 return AudioProcessing::kMono;
44 case media::CHANNEL_LAYOUT_STEREO:
45 return AudioProcessing::kStereo;
46 case media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC:
47 return AudioProcessing::kStereoAndKeyboard;
48 default:
49 NOTREACHED() << "Layout not supported: " << media_layout;
50 return AudioProcessing::kMono;
54 // This is only used for playout data where only max two channels is supported.
55 AudioProcessing::ChannelLayout ChannelsToLayout(int num_channels) {
56 switch (num_channels) {
57 case 1:
58 return AudioProcessing::kMono;
59 case 2:
60 return AudioProcessing::kStereo;
61 default:
62 NOTREACHED() << "Channels not supported: " << num_channels;
63 return AudioProcessing::kMono;
67 // Used by UMA histograms and entries shouldn't be re-ordered or removed.
68 enum AudioTrackProcessingStates {
69 AUDIO_PROCESSING_ENABLED = 0,
70 AUDIO_PROCESSING_DISABLED,
71 AUDIO_PROCESSING_IN_WEBRTC,
72 AUDIO_PROCESSING_MAX
75 void RecordProcessingState(AudioTrackProcessingStates state) {
76 UMA_HISTOGRAM_ENUMERATION("Media.AudioTrackProcessingStates",
77 state, AUDIO_PROCESSING_MAX);
80 bool isDelayAgnosticAecEnabled() {
81 // Note: It's important to query the field trial state first, to ensure that
82 // UMA reports the correct group.
83 const std::string group_name =
84 base::FieldTrialList::FindFullName("UseDelayAgnosticAEC");
85 base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
86 if (command_line->HasSwitch(switches::kEnableDelayAgnosticAec))
87 return true;
89 return (group_name == "Enabled" || group_name == "DefaultEnabled");
91 } // namespace
93 // Wraps AudioBus to provide access to the array of channel pointers, since this
94 // is the type webrtc::AudioProcessing deals in. The array is refreshed on every
95 // channel_ptrs() call, and will be valid until the underlying AudioBus pointers
96 // are changed, e.g. through calls to SetChannelData() or SwapChannels().
98 // All methods are called on one of the capture or render audio threads
99 // exclusively.
100 class MediaStreamAudioBus {
101 public:
102 MediaStreamAudioBus(int channels, int frames)
103 : bus_(media::AudioBus::Create(channels, frames)),
104 channel_ptrs_(new float*[channels]) {
105 // May be created in the main render thread and used in the audio threads.
106 thread_checker_.DetachFromThread();
109 media::AudioBus* bus() {
110 DCHECK(thread_checker_.CalledOnValidThread());
111 return bus_.get();
114 float* const* channel_ptrs() {
115 DCHECK(thread_checker_.CalledOnValidThread());
116 for (int i = 0; i < bus_->channels(); ++i) {
117 channel_ptrs_[i] = bus_->channel(i);
119 return channel_ptrs_.get();
122 private:
123 base::ThreadChecker thread_checker_;
124 scoped_ptr<media::AudioBus> bus_;
125 scoped_ptr<float*[]> channel_ptrs_;
128 // Wraps AudioFifo to provide a cleaner interface to MediaStreamAudioProcessor.
129 // It avoids the FIFO when the source and destination frames match. All methods
130 // are called on one of the capture or render audio threads exclusively. If
131 // |source_channels| is larger than |destination_channels|, only the first
132 // |destination_channels| are kept from the source.
133 class MediaStreamAudioFifo {
134 public:
135 MediaStreamAudioFifo(int source_channels,
136 int destination_channels,
137 int source_frames,
138 int destination_frames,
139 int sample_rate)
140 : source_channels_(source_channels),
141 source_frames_(source_frames),
142 sample_rate_(sample_rate),
143 destination_(
144 new MediaStreamAudioBus(destination_channels, destination_frames)),
145 data_available_(false) {
146 DCHECK_GE(source_channels, destination_channels);
147 DCHECK_GT(sample_rate_, 0);
149 if (source_channels > destination_channels) {
150 audio_source_intermediate_ =
151 media::AudioBus::CreateWrapper(destination_channels);
154 if (source_frames != destination_frames) {
155 // Since we require every Push to be followed by as many Consumes as
156 // possible, twice the larger of the two is a (probably) loose upper bound
157 // on the FIFO size.
158 const int fifo_frames = 2 * std::max(source_frames, destination_frames);
159 fifo_.reset(new media::AudioFifo(destination_channels, fifo_frames));
162 // May be created in the main render thread and used in the audio threads.
163 thread_checker_.DetachFromThread();
166 void Push(const media::AudioBus& source, base::TimeDelta audio_delay) {
167 DCHECK(thread_checker_.CalledOnValidThread());
168 DCHECK_EQ(source.channels(), source_channels_);
169 DCHECK_EQ(source.frames(), source_frames_);
171 const media::AudioBus* source_to_push = &source;
173 if (audio_source_intermediate_) {
174 for (int i = 0; i < destination_->bus()->channels(); ++i) {
175 audio_source_intermediate_->SetChannelData(
177 const_cast<float*>(source.channel(i)));
179 audio_source_intermediate_->set_frames(source.frames());
180 source_to_push = audio_source_intermediate_.get();
183 if (fifo_) {
184 next_audio_delay_ = audio_delay +
185 fifo_->frames() * base::TimeDelta::FromSeconds(1) / sample_rate_;
186 fifo_->Push(source_to_push);
187 } else {
188 source_to_push->CopyTo(destination_->bus());
189 next_audio_delay_ = audio_delay;
190 data_available_ = true;
194 // Returns true if there are destination_frames() of data available to be
195 // consumed, and otherwise false.
196 bool Consume(MediaStreamAudioBus** destination,
197 base::TimeDelta* audio_delay) {
198 DCHECK(thread_checker_.CalledOnValidThread());
200 if (fifo_) {
201 if (fifo_->frames() < destination_->bus()->frames())
202 return false;
204 fifo_->Consume(destination_->bus(), 0, destination_->bus()->frames());
205 *audio_delay = next_audio_delay_;
206 next_audio_delay_ -=
207 destination_->bus()->frames() * base::TimeDelta::FromSeconds(1) /
208 sample_rate_;
209 } else {
210 if (!data_available_)
211 return false;
212 *audio_delay = next_audio_delay_;
213 // The data was already copied to |destination_| in this case.
214 data_available_ = false;
217 *destination = destination_.get();
218 return true;
221 private:
222 base::ThreadChecker thread_checker_;
223 const int source_channels_; // For a DCHECK.
224 const int source_frames_; // For a DCHECK.
225 const int sample_rate_;
226 scoped_ptr<media::AudioBus> audio_source_intermediate_;
227 scoped_ptr<MediaStreamAudioBus> destination_;
228 scoped_ptr<media::AudioFifo> fifo_;
230 // When using |fifo_|, this is the audio delay of the first sample to be
231 // consumed next from the FIFO. When not using |fifo_|, this is the audio
232 // delay of the first sample in |destination_|.
233 base::TimeDelta next_audio_delay_;
235 // True when |destination_| contains the data to be returned by the next call
236 // to Consume(). Only used when the FIFO is disabled.
237 bool data_available_;
240 MediaStreamAudioProcessor::MediaStreamAudioProcessor(
241 const blink::WebMediaConstraints& constraints,
242 int effects,
243 WebRtcPlayoutDataSource* playout_data_source)
244 : render_delay_ms_(0),
245 playout_data_source_(playout_data_source),
246 audio_mirroring_(false),
247 typing_detected_(false),
248 stopped_(false) {
249 capture_thread_checker_.DetachFromThread();
250 render_thread_checker_.DetachFromThread();
251 InitializeAudioProcessingModule(constraints, effects);
253 aec_dump_message_filter_ = AecDumpMessageFilter::Get();
254 // In unit tests not creating a message filter, |aec_dump_message_filter_|
255 // will be NULL. We can just ignore that. Other unit tests and browser tests
256 // ensure that we do get the filter when we should.
257 if (aec_dump_message_filter_.get())
258 aec_dump_message_filter_->AddDelegate(this);
261 MediaStreamAudioProcessor::~MediaStreamAudioProcessor() {
262 DCHECK(main_thread_checker_.CalledOnValidThread());
263 Stop();
266 void MediaStreamAudioProcessor::OnCaptureFormatChanged(
267 const media::AudioParameters& input_format) {
268 DCHECK(main_thread_checker_.CalledOnValidThread());
269 // There is no need to hold a lock here since the caller guarantees that
270 // there is no more PushCaptureData() and ProcessAndConsumeData() callbacks
271 // on the capture thread.
272 InitializeCaptureFifo(input_format);
274 // Reset the |capture_thread_checker_| since the capture data will come from
275 // a new capture thread.
276 capture_thread_checker_.DetachFromThread();
279 void MediaStreamAudioProcessor::PushCaptureData(
280 const media::AudioBus& audio_source,
281 base::TimeDelta capture_delay) {
282 DCHECK(capture_thread_checker_.CalledOnValidThread());
284 capture_fifo_->Push(audio_source, capture_delay);
287 bool MediaStreamAudioProcessor::ProcessAndConsumeData(
288 int volume,
289 bool key_pressed,
290 media::AudioBus** processed_data,
291 base::TimeDelta* capture_delay,
292 int* new_volume) {
293 DCHECK(capture_thread_checker_.CalledOnValidThread());
294 DCHECK(processed_data);
295 DCHECK(capture_delay);
296 DCHECK(new_volume);
298 TRACE_EVENT0("audio", "MediaStreamAudioProcessor::ProcessAndConsumeData");
300 MediaStreamAudioBus* process_bus;
301 if (!capture_fifo_->Consume(&process_bus, capture_delay))
302 return false;
304 // Use the process bus directly if audio processing is disabled.
305 MediaStreamAudioBus* output_bus = process_bus;
306 *new_volume = 0;
307 if (audio_processing_) {
308 output_bus = output_bus_.get();
309 *new_volume = ProcessData(process_bus->channel_ptrs(),
310 process_bus->bus()->frames(), *capture_delay,
311 volume, key_pressed, output_bus->channel_ptrs());
314 // Swap channels before interleaving the data.
315 if (audio_mirroring_ &&
316 output_format_.channel_layout() == media::CHANNEL_LAYOUT_STEREO) {
317 // Swap the first and second channels.
318 output_bus->bus()->SwapChannels(0, 1);
321 *processed_data = output_bus->bus();
323 return true;
326 void MediaStreamAudioProcessor::Stop() {
327 DCHECK(main_thread_checker_.CalledOnValidThread());
328 if (stopped_)
329 return;
331 stopped_ = true;
333 if (aec_dump_message_filter_.get()) {
334 aec_dump_message_filter_->RemoveDelegate(this);
335 aec_dump_message_filter_ = NULL;
338 if (!audio_processing_.get())
339 return;
341 StopEchoCancellationDump(audio_processing_.get());
343 if (playout_data_source_) {
344 playout_data_source_->RemovePlayoutSink(this);
345 playout_data_source_ = NULL;
349 const media::AudioParameters& MediaStreamAudioProcessor::InputFormat() const {
350 return input_format_;
353 const media::AudioParameters& MediaStreamAudioProcessor::OutputFormat() const {
354 return output_format_;
357 void MediaStreamAudioProcessor::OnAecDumpFile(
358 const IPC::PlatformFileForTransit& file_handle) {
359 DCHECK(main_thread_checker_.CalledOnValidThread());
361 base::File file = IPC::PlatformFileForTransitToFile(file_handle);
362 DCHECK(file.IsValid());
364 if (audio_processing_)
365 StartEchoCancellationDump(audio_processing_.get(), file.Pass());
366 else
367 file.Close();
370 void MediaStreamAudioProcessor::OnDisableAecDump() {
371 DCHECK(main_thread_checker_.CalledOnValidThread());
372 if (audio_processing_)
373 StopEchoCancellationDump(audio_processing_.get());
376 void MediaStreamAudioProcessor::OnIpcClosing() {
377 DCHECK(main_thread_checker_.CalledOnValidThread());
378 aec_dump_message_filter_ = NULL;
381 void MediaStreamAudioProcessor::OnPlayoutData(media::AudioBus* audio_bus,
382 int sample_rate,
383 int audio_delay_milliseconds) {
384 DCHECK(render_thread_checker_.CalledOnValidThread());
385 DCHECK(audio_processing_->echo_control_mobile()->is_enabled() ^
386 audio_processing_->echo_cancellation()->is_enabled());
388 TRACE_EVENT0("audio", "MediaStreamAudioProcessor::OnPlayoutData");
389 DCHECK_LT(audio_delay_milliseconds,
390 std::numeric_limits<base::subtle::Atomic32>::max());
391 base::subtle::Release_Store(&render_delay_ms_, audio_delay_milliseconds);
393 InitializeRenderFifoIfNeeded(sample_rate, audio_bus->channels(),
394 audio_bus->frames());
396 render_fifo_->Push(
397 *audio_bus, base::TimeDelta::FromMilliseconds(audio_delay_milliseconds));
398 MediaStreamAudioBus* analysis_bus;
399 base::TimeDelta audio_delay;
400 while (render_fifo_->Consume(&analysis_bus, &audio_delay)) {
401 // TODO(ajm): Should AnalyzeReverseStream() account for the |audio_delay|?
402 audio_processing_->AnalyzeReverseStream(
403 analysis_bus->channel_ptrs(),
404 analysis_bus->bus()->frames(),
405 sample_rate,
406 ChannelsToLayout(audio_bus->channels()));
410 void MediaStreamAudioProcessor::OnPlayoutDataSourceChanged() {
411 DCHECK(main_thread_checker_.CalledOnValidThread());
412 // There is no need to hold a lock here since the caller guarantees that
413 // there is no more OnPlayoutData() callback on the render thread.
414 render_thread_checker_.DetachFromThread();
415 render_fifo_.reset();
418 void MediaStreamAudioProcessor::GetStats(AudioProcessorStats* stats) {
419 stats->typing_noise_detected =
420 (base::subtle::Acquire_Load(&typing_detected_) != false);
421 GetAecStats(audio_processing_.get()->echo_cancellation(), stats);
424 void MediaStreamAudioProcessor::InitializeAudioProcessingModule(
425 const blink::WebMediaConstraints& constraints, int effects) {
426 DCHECK(main_thread_checker_.CalledOnValidThread());
427 DCHECK(!audio_processing_);
429 MediaAudioConstraints audio_constraints(constraints, effects);
431 // Audio mirroring can be enabled even though audio processing is otherwise
432 // disabled.
433 audio_mirroring_ = audio_constraints.GetProperty(
434 MediaAudioConstraints::kGoogAudioMirroring);
436 #if defined(OS_IOS)
437 // On iOS, VPIO provides built-in AGC and AEC.
438 const bool echo_cancellation = false;
439 const bool goog_agc = false;
440 #else
441 const bool echo_cancellation =
442 audio_constraints.GetEchoCancellationProperty();
443 const bool goog_agc = audio_constraints.GetProperty(
444 MediaAudioConstraints::kGoogAutoGainControl);
445 #endif
447 #if defined(OS_IOS) || defined(OS_ANDROID)
448 const bool goog_experimental_aec = false;
449 const bool goog_typing_detection = false;
450 #else
451 const bool goog_experimental_aec = audio_constraints.GetProperty(
452 MediaAudioConstraints::kGoogExperimentalEchoCancellation);
453 const bool goog_typing_detection = audio_constraints.GetProperty(
454 MediaAudioConstraints::kGoogTypingNoiseDetection);
455 #endif
457 const bool goog_ns = audio_constraints.GetProperty(
458 MediaAudioConstraints::kGoogNoiseSuppression);
459 const bool goog_experimental_ns = audio_constraints.GetProperty(
460 MediaAudioConstraints::kGoogExperimentalNoiseSuppression);
461 const bool goog_beamforming = audio_constraints.GetProperty(
462 MediaAudioConstraints::kGoogBeamforming);
463 const bool goog_high_pass_filter = audio_constraints.GetProperty(
464 MediaAudioConstraints::kGoogHighpassFilter);
466 // Return immediately if no goog constraint is enabled.
467 if (!echo_cancellation && !goog_experimental_aec && !goog_ns &&
468 !goog_high_pass_filter && !goog_typing_detection &&
469 !goog_agc && !goog_experimental_ns && !goog_beamforming) {
470 RecordProcessingState(AUDIO_PROCESSING_DISABLED);
471 return;
474 // Experimental options provided at creation.
475 webrtc::Config config;
476 if (goog_experimental_aec)
477 config.Set<webrtc::DelayCorrection>(new webrtc::DelayCorrection(true));
478 if (goog_experimental_ns)
479 config.Set<webrtc::ExperimentalNs>(new webrtc::ExperimentalNs(true));
480 if (isDelayAgnosticAecEnabled())
481 config.Set<webrtc::ReportedDelay>(new webrtc::ReportedDelay(false));
482 if (goog_beamforming) {
483 ConfigureBeamforming(&config);
486 // Create and configure the webrtc::AudioProcessing.
487 audio_processing_.reset(webrtc::AudioProcessing::Create(config));
489 // Enable the audio processing components.
490 if (echo_cancellation) {
491 EnableEchoCancellation(audio_processing_.get());
493 if (playout_data_source_)
494 playout_data_source_->AddPlayoutSink(this);
496 // Prepare for logging echo information. If there are data remaining in
497 // |echo_information_| we simply discard it.
498 echo_information_.reset(new EchoInformation());
501 if (goog_ns)
502 EnableNoiseSuppression(audio_processing_.get());
504 if (goog_high_pass_filter)
505 EnableHighPassFilter(audio_processing_.get());
507 if (goog_typing_detection) {
508 // TODO(xians): Remove this |typing_detector_| after the typing suppression
509 // is enabled by default.
510 typing_detector_.reset(new webrtc::TypingDetection());
511 EnableTypingDetection(audio_processing_.get(), typing_detector_.get());
514 if (goog_agc)
515 EnableAutomaticGainControl(audio_processing_.get());
517 RecordProcessingState(AUDIO_PROCESSING_ENABLED);
520 void MediaStreamAudioProcessor::ConfigureBeamforming(webrtc::Config* config) {
521 bool enabled = false;
522 std::vector<webrtc::Point> geometry(1, webrtc::Point(0.f, 0.f, 0.f));
523 #if defined(OS_CHROMEOS)
524 const std::string board = base::SysInfo::GetLsbReleaseBoard();
525 if (board == "peach_pi") {
526 enabled = true;
527 geometry.push_back(webrtc::Point(0.050f, 0.f, 0.f));
528 } else if (board == "swanky") {
529 enabled = true;
530 geometry.push_back(webrtc::Point(0.052f, 0.f, 0.f));
532 #endif
533 config->Set<webrtc::Beamforming>(new webrtc::Beamforming(enabled, geometry));
536 void MediaStreamAudioProcessor::InitializeCaptureFifo(
537 const media::AudioParameters& input_format) {
538 DCHECK(main_thread_checker_.CalledOnValidThread());
539 DCHECK(input_format.IsValid());
540 input_format_ = input_format;
542 // TODO(ajm): For now, we assume fixed parameters for the output when audio
543 // processing is enabled, to match the previous behavior. We should either
544 // use the input parameters (in which case, audio processing will convert
545 // at output) or ideally, have a backchannel from the sink to know what
546 // format it would prefer.
547 const int output_sample_rate = audio_processing_ ?
548 kAudioProcessingSampleRate : input_format.sample_rate();
549 media::ChannelLayout output_channel_layout = audio_processing_ ?
550 media::GuessChannelLayout(kAudioProcessingNumberOfChannels) :
551 input_format.channel_layout();
553 // The output channels from the fifo is normally the same as input.
554 int fifo_output_channels = input_format.channels();
556 // Special case for if we have a keyboard mic channel on the input and no
557 // audio processing is used. We will then have the fifo strip away that
558 // channel. So we use stereo as output layout, and also change the output
559 // channels for the fifo.
560 if (input_format.channel_layout() ==
561 media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC &&
562 !audio_processing_) {
563 output_channel_layout = media::CHANNEL_LAYOUT_STEREO;
564 fifo_output_channels = ChannelLayoutToChannelCount(output_channel_layout);
567 // webrtc::AudioProcessing requires a 10 ms chunk size. We use this native
568 // size when processing is enabled. When disabled we use the same size as
569 // the source if less than 10 ms.
571 // TODO(ajm): This conditional buffer size appears to be assuming knowledge of
572 // the sink based on the source parameters. PeerConnection sinks seem to want
573 // 10 ms chunks regardless, while WebAudio sinks want less, and we're assuming
574 // we can identify WebAudio sinks by the input chunk size. Less fragile would
575 // be to have the sink actually tell us how much it wants (as in the above
576 // TODO).
577 int processing_frames = input_format.sample_rate() / 100;
578 int output_frames = output_sample_rate / 100;
579 if (!audio_processing_ && input_format.frames_per_buffer() < output_frames) {
580 processing_frames = input_format.frames_per_buffer();
581 output_frames = processing_frames;
584 output_format_ = media::AudioParameters(
585 media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
586 output_channel_layout,
587 output_sample_rate,
589 output_frames);
591 capture_fifo_.reset(
592 new MediaStreamAudioFifo(input_format.channels(),
593 fifo_output_channels,
594 input_format.frames_per_buffer(),
595 processing_frames,
596 input_format.sample_rate()));
598 if (audio_processing_) {
599 output_bus_.reset(new MediaStreamAudioBus(output_format_.channels(),
600 output_frames));
604 void MediaStreamAudioProcessor::InitializeRenderFifoIfNeeded(
605 int sample_rate, int number_of_channels, int frames_per_buffer) {
606 DCHECK(render_thread_checker_.CalledOnValidThread());
607 if (render_fifo_.get() &&
608 render_format_.sample_rate() == sample_rate &&
609 render_format_.channels() == number_of_channels &&
610 render_format_.frames_per_buffer() == frames_per_buffer) {
611 // Do nothing if the |render_fifo_| has been setup properly.
612 return;
615 render_format_ = media::AudioParameters(
616 media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
617 media::GuessChannelLayout(number_of_channels),
618 sample_rate,
620 frames_per_buffer);
622 const int analysis_frames = sample_rate / 100; // 10 ms chunks.
623 render_fifo_.reset(
624 new MediaStreamAudioFifo(number_of_channels,
625 number_of_channels,
626 frames_per_buffer,
627 analysis_frames,
628 sample_rate));
631 int MediaStreamAudioProcessor::ProcessData(const float* const* process_ptrs,
632 int process_frames,
633 base::TimeDelta capture_delay,
634 int volume,
635 bool key_pressed,
636 float* const* output_ptrs) {
637 DCHECK(audio_processing_);
638 DCHECK(capture_thread_checker_.CalledOnValidThread());
640 TRACE_EVENT0("audio", "MediaStreamAudioProcessor::ProcessData");
642 base::subtle::Atomic32 render_delay_ms =
643 base::subtle::Acquire_Load(&render_delay_ms_);
644 int64 capture_delay_ms = capture_delay.InMilliseconds();
645 DCHECK_LT(capture_delay_ms,
646 std::numeric_limits<base::subtle::Atomic32>::max());
647 int total_delay_ms = capture_delay_ms + render_delay_ms;
648 if (total_delay_ms > 300) {
649 LOG(WARNING) << "Large audio delay, capture delay: " << capture_delay_ms
650 << "ms; render delay: " << render_delay_ms << "ms";
653 webrtc::AudioProcessing* ap = audio_processing_.get();
654 ap->set_stream_delay_ms(total_delay_ms);
656 DCHECK_LE(volume, WebRtcAudioDeviceImpl::kMaxVolumeLevel);
657 webrtc::GainControl* agc = ap->gain_control();
658 int err = agc->set_stream_analog_level(volume);
659 DCHECK_EQ(err, 0) << "set_stream_analog_level() error: " << err;
661 ap->set_stream_key_pressed(key_pressed);
663 err = ap->ProcessStream(process_ptrs,
664 process_frames,
665 input_format_.sample_rate(),
666 MapLayout(input_format_.channel_layout()),
667 output_format_.sample_rate(),
668 MapLayout(output_format_.channel_layout()),
669 output_ptrs);
670 DCHECK_EQ(err, 0) << "ProcessStream() error: " << err;
672 if (typing_detector_) {
673 webrtc::VoiceDetection* vad = ap->voice_detection();
674 DCHECK(vad->is_enabled());
675 bool detected = typing_detector_->Process(key_pressed,
676 vad->stream_has_voice());
677 base::subtle::Release_Store(&typing_detected_, detected);
680 if (echo_information_) {
681 echo_information_.get()->UpdateAecDelayStats(ap->echo_cancellation());
684 // Return 0 if the volume hasn't been changed, and otherwise the new volume.
685 return (agc->stream_analog_level() == volume) ?
686 0 : agc->stream_analog_level();
689 } // namespace content