Enable right clicking on the applist doodle web contents and log the data.
[chromium-blink-merge.git] / content / renderer / media / media_stream_audio_processor_unittest.cc
blob1dfa2c93cfad0a2049ce3bc87ca2a3dc67172854
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/files/file_path.h"
6 #include "base/files/file_util.h"
7 #include "base/logging.h"
8 #include "base/memory/aligned_memory.h"
9 #include "base/path_service.h"
10 #include "base/time/time.h"
11 #include "content/public/common/media_stream_request.h"
12 #include "content/renderer/media/media_stream_audio_processor.h"
13 #include "content/renderer/media/media_stream_audio_processor_options.h"
14 #include "content/renderer/media/mock_media_constraint_factory.h"
15 #include "media/audio/audio_parameters.h"
16 #include "media/base/audio_bus.h"
17 #include "testing/gmock/include/gmock/gmock.h"
18 #include "testing/gtest/include/gtest/gtest.h"
19 #include "third_party/WebKit/public/platform/WebMediaConstraints.h"
20 #include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
22 using ::testing::_;
23 using ::testing::AnyNumber;
24 using ::testing::AtLeast;
25 using ::testing::Return;
27 namespace content {
29 namespace {
31 #if defined(ANDROID)
32 const int kAudioProcessingSampleRate = 16000;
33 #else
34 const int kAudioProcessingSampleRate = 32000;
35 #endif
36 const int kAudioProcessingNumberOfChannel = 1;
38 // The number of packers used for testing.
39 const int kNumberOfPacketsForTest = 100;
41 const int kMaxNumberOfPlayoutDataChannels = 2;
43 void ReadDataFromSpeechFile(char* data, int length) {
44 base::FilePath file;
45 CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &file));
46 file = file.Append(FILE_PATH_LITERAL("media"))
47 .Append(FILE_PATH_LITERAL("test"))
48 .Append(FILE_PATH_LITERAL("data"))
49 .Append(FILE_PATH_LITERAL("speech_16b_stereo_48kHz.raw"));
50 DCHECK(base::PathExists(file));
51 int64 data_file_size64 = 0;
52 DCHECK(base::GetFileSize(file, &data_file_size64));
53 EXPECT_EQ(length, base::ReadFile(file, data, length));
54 DCHECK(data_file_size64 > length);
57 } // namespace
59 class MediaStreamAudioProcessorTest : public ::testing::Test {
60 public:
61 MediaStreamAudioProcessorTest()
62 : params_(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
63 media::CHANNEL_LAYOUT_STEREO, 48000, 16, 512) {
66 protected:
67 // Helper method to save duplicated code.
68 void ProcessDataAndVerifyFormat(MediaStreamAudioProcessor* audio_processor,
69 int expected_output_sample_rate,
70 int expected_output_channels,
71 int expected_output_buffer_size) {
72 // Read the audio data from a file.
73 const media::AudioParameters& params = audio_processor->InputFormat();
74 const int packet_size =
75 params.frames_per_buffer() * 2 * params.channels();
76 const size_t length = packet_size * kNumberOfPacketsForTest;
77 scoped_ptr<char[]> capture_data(new char[length]);
78 ReadDataFromSpeechFile(capture_data.get(), length);
79 const int16* data_ptr = reinterpret_cast<const int16*>(capture_data.get());
80 scoped_ptr<media::AudioBus> data_bus = media::AudioBus::Create(
81 params.channels(), params.frames_per_buffer());
83 // |data_bus_playout| is used if the number of capture channels is larger
84 // that max allowed playout channels. |data_bus_playout_to_use| points to
85 // the AudioBus to use, either |data_bus| or |data_bus_playout|.
86 scoped_ptr<media::AudioBus> data_bus_playout;
87 media::AudioBus* data_bus_playout_to_use = data_bus.get();
88 if (params.channels() > kMaxNumberOfPlayoutDataChannels) {
89 data_bus_playout =
90 media::AudioBus::CreateWrapper(kMaxNumberOfPlayoutDataChannels);
91 data_bus_playout->set_frames(params.frames_per_buffer());
92 data_bus_playout_to_use = data_bus_playout.get();
95 const base::TimeDelta input_capture_delay =
96 base::TimeDelta::FromMilliseconds(20);
97 const base::TimeDelta output_buffer_duration =
98 expected_output_buffer_size * base::TimeDelta::FromSeconds(1) /
99 expected_output_sample_rate;
100 for (int i = 0; i < kNumberOfPacketsForTest; ++i) {
101 data_bus->FromInterleaved(data_ptr, data_bus->frames(), 2);
102 audio_processor->PushCaptureData(*data_bus, input_capture_delay);
104 // |audio_processor| does nothing when the audio processing is off in
105 // the processor.
106 webrtc::AudioProcessing* ap = audio_processor->audio_processing_.get();
107 #if defined(OS_ANDROID) || defined(OS_IOS)
108 const bool is_aec_enabled = ap && ap->echo_control_mobile()->is_enabled();
109 // AEC should be turned off for mobiles.
110 DCHECK(!ap || !ap->echo_cancellation()->is_enabled());
111 #else
112 const bool is_aec_enabled = ap && ap->echo_cancellation()->is_enabled();
113 #endif
114 if (is_aec_enabled) {
115 if (params.channels() > kMaxNumberOfPlayoutDataChannels) {
116 for (int i = 0; i < kMaxNumberOfPlayoutDataChannels; ++i) {
117 data_bus_playout->SetChannelData(
118 i, const_cast<float*>(data_bus->channel(i)));
121 audio_processor->OnPlayoutData(data_bus_playout_to_use,
122 params.sample_rate(), 10);
125 media::AudioBus* processed_data = nullptr;
126 base::TimeDelta capture_delay;
127 int new_volume = 0;
128 while (audio_processor->ProcessAndConsumeData(
129 255, false, &processed_data, &capture_delay, &new_volume)) {
130 EXPECT_TRUE(processed_data);
131 EXPECT_NEAR(input_capture_delay.InMillisecondsF(),
132 capture_delay.InMillisecondsF(),
133 output_buffer_duration.InMillisecondsF());
134 EXPECT_EQ(audio_processor->OutputFormat().sample_rate(),
135 expected_output_sample_rate);
136 EXPECT_EQ(audio_processor->OutputFormat().channels(),
137 expected_output_channels);
138 EXPECT_EQ(audio_processor->OutputFormat().frames_per_buffer(),
139 expected_output_buffer_size);
142 data_ptr += params.frames_per_buffer() * params.channels();
146 void VerifyDefaultComponents(MediaStreamAudioProcessor* audio_processor) {
147 webrtc::AudioProcessing* audio_processing =
148 audio_processor->audio_processing_.get();
149 #if defined(OS_ANDROID)
150 EXPECT_TRUE(audio_processing->echo_control_mobile()->is_enabled());
151 EXPECT_TRUE(audio_processing->echo_control_mobile()->routing_mode() ==
152 webrtc::EchoControlMobile::kSpeakerphone);
153 EXPECT_FALSE(audio_processing->echo_cancellation()->is_enabled());
154 #elif !defined(OS_IOS)
155 EXPECT_TRUE(audio_processing->echo_cancellation()->is_enabled());
156 EXPECT_TRUE(audio_processing->echo_cancellation()->suppression_level() ==
157 webrtc::EchoCancellation::kHighSuppression);
158 EXPECT_TRUE(audio_processing->echo_cancellation()->are_metrics_enabled());
159 EXPECT_TRUE(
160 audio_processing->echo_cancellation()->is_delay_logging_enabled());
161 #endif
163 EXPECT_TRUE(audio_processing->noise_suppression()->is_enabled());
164 EXPECT_TRUE(audio_processing->noise_suppression()->level() ==
165 webrtc::NoiseSuppression::kHigh);
166 EXPECT_TRUE(audio_processing->high_pass_filter()->is_enabled());
167 EXPECT_TRUE(audio_processing->gain_control()->is_enabled());
168 #if defined(OS_ANDROID) || defined(OS_IOS)
169 EXPECT_TRUE(audio_processing->gain_control()->mode() ==
170 webrtc::GainControl::kFixedDigital);
171 EXPECT_FALSE(audio_processing->voice_detection()->is_enabled());
172 #else
173 EXPECT_TRUE(audio_processing->gain_control()->mode() ==
174 webrtc::GainControl::kAdaptiveAnalog);
175 EXPECT_TRUE(audio_processing->voice_detection()->is_enabled());
176 EXPECT_TRUE(audio_processing->voice_detection()->likelihood() ==
177 webrtc::VoiceDetection::kVeryLowLikelihood);
178 #endif
181 media::AudioParameters params_;
184 TEST_F(MediaStreamAudioProcessorTest, WithAudioProcessing) {
185 MockMediaConstraintFactory constraint_factory;
186 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
187 new WebRtcAudioDeviceImpl());
188 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
189 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
190 constraint_factory.CreateWebMediaConstraints(), 0,
191 webrtc_audio_device.get()));
192 EXPECT_TRUE(audio_processor->has_audio_processing());
193 audio_processor->OnCaptureFormatChanged(params_);
194 VerifyDefaultComponents(audio_processor.get());
196 ProcessDataAndVerifyFormat(audio_processor.get(),
197 kAudioProcessingSampleRate,
198 kAudioProcessingNumberOfChannel,
199 kAudioProcessingSampleRate / 100);
200 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
201 // |audio_processor|.
202 audio_processor = NULL;
205 TEST_F(MediaStreamAudioProcessorTest, VerifyTabCaptureWithoutAudioProcessing) {
206 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
207 new WebRtcAudioDeviceImpl());
208 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceTab source.
209 MockMediaConstraintFactory tab_constraint_factory;
210 const std::string tab_string = kMediaStreamSourceTab;
211 tab_constraint_factory.AddMandatory(kMediaStreamSource,
212 tab_string);
213 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
214 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
215 tab_constraint_factory.CreateWebMediaConstraints(), 0,
216 webrtc_audio_device.get()));
217 EXPECT_FALSE(audio_processor->has_audio_processing());
218 audio_processor->OnCaptureFormatChanged(params_);
220 ProcessDataAndVerifyFormat(audio_processor.get(),
221 params_.sample_rate(),
222 params_.channels(),
223 params_.sample_rate() / 100);
225 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceSystem
226 // source.
227 MockMediaConstraintFactory system_constraint_factory;
228 const std::string system_string = kMediaStreamSourceSystem;
229 system_constraint_factory.AddMandatory(kMediaStreamSource,
230 system_string);
231 audio_processor = new rtc::RefCountedObject<MediaStreamAudioProcessor>(
232 system_constraint_factory.CreateWebMediaConstraints(), 0,
233 webrtc_audio_device.get());
234 EXPECT_FALSE(audio_processor->has_audio_processing());
236 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
237 // |audio_processor|.
238 audio_processor = NULL;
241 TEST_F(MediaStreamAudioProcessorTest, TurnOffDefaultConstraints) {
242 // Turn off the default constraints and pass it to MediaStreamAudioProcessor.
243 MockMediaConstraintFactory constraint_factory;
244 constraint_factory.DisableDefaultAudioConstraints();
245 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
246 new WebRtcAudioDeviceImpl());
247 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
248 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
249 constraint_factory.CreateWebMediaConstraints(), 0,
250 webrtc_audio_device.get()));
251 EXPECT_FALSE(audio_processor->has_audio_processing());
252 audio_processor->OnCaptureFormatChanged(params_);
254 ProcessDataAndVerifyFormat(audio_processor.get(),
255 params_.sample_rate(),
256 params_.channels(),
257 params_.sample_rate() / 100);
258 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
259 // |audio_processor|.
260 audio_processor = NULL;
263 TEST_F(MediaStreamAudioProcessorTest, VerifyConstraints) {
264 static const char* kDefaultAudioConstraints[] = {
265 MediaAudioConstraints::kEchoCancellation,
266 MediaAudioConstraints::kGoogAudioMirroring,
267 MediaAudioConstraints::kGoogAutoGainControl,
268 MediaAudioConstraints::kGoogEchoCancellation,
269 MediaAudioConstraints::kGoogExperimentalEchoCancellation,
270 MediaAudioConstraints::kGoogExperimentalAutoGainControl,
271 MediaAudioConstraints::kGoogExperimentalNoiseSuppression,
272 MediaAudioConstraints::kGoogHighpassFilter,
273 MediaAudioConstraints::kGoogNoiseSuppression,
274 MediaAudioConstraints::kGoogTypingNoiseDetection,
275 kMediaStreamAudioHotword
278 // Verify mandatory constraints.
279 for (size_t i = 0; i < arraysize(kDefaultAudioConstraints); ++i) {
280 MockMediaConstraintFactory constraint_factory;
281 constraint_factory.AddMandatory(kDefaultAudioConstraints[i], false);
282 blink::WebMediaConstraints constraints =
283 constraint_factory.CreateWebMediaConstraints();
284 MediaAudioConstraints audio_constraints(constraints, 0);
285 EXPECT_FALSE(audio_constraints.GetProperty(kDefaultAudioConstraints[i]));
288 // Verify optional constraints.
289 for (size_t i = 0; i < arraysize(kDefaultAudioConstraints); ++i) {
290 MockMediaConstraintFactory constraint_factory;
291 constraint_factory.AddOptional(kDefaultAudioConstraints[i], false);
292 blink::WebMediaConstraints constraints =
293 constraint_factory.CreateWebMediaConstraints();
294 MediaAudioConstraints audio_constraints(constraints, 0);
295 EXPECT_FALSE(audio_constraints.GetProperty(kDefaultAudioConstraints[i]));
299 // Verify echo cancellation is off when platform aec effect is on.
300 MockMediaConstraintFactory constraint_factory;
301 MediaAudioConstraints audio_constraints(
302 constraint_factory.CreateWebMediaConstraints(),
303 media::AudioParameters::ECHO_CANCELLER);
304 EXPECT_FALSE(audio_constraints.GetEchoCancellationProperty());
308 // Verify |kEchoCancellation| overwrite |kGoogEchoCancellation|.
309 MockMediaConstraintFactory constraint_factory_1;
310 constraint_factory_1.AddOptional(MediaAudioConstraints::kEchoCancellation,
311 true);
312 constraint_factory_1.AddOptional(
313 MediaAudioConstraints::kGoogEchoCancellation, false);
314 blink::WebMediaConstraints constraints_1 =
315 constraint_factory_1.CreateWebMediaConstraints();
316 MediaAudioConstraints audio_constraints_1(constraints_1, 0);
317 EXPECT_TRUE(audio_constraints_1.GetEchoCancellationProperty());
319 MockMediaConstraintFactory constraint_factory_2;
320 constraint_factory_2.AddOptional(MediaAudioConstraints::kEchoCancellation,
321 false);
322 constraint_factory_2.AddOptional(
323 MediaAudioConstraints::kGoogEchoCancellation, true);
324 blink::WebMediaConstraints constraints_2 =
325 constraint_factory_2.CreateWebMediaConstraints();
326 MediaAudioConstraints audio_constraints_2(constraints_2, 0);
327 EXPECT_FALSE(audio_constraints_2.GetEchoCancellationProperty());
331 // When |kEchoCancellation| is explicitly set to false, the default values
332 // for all the constraints except |kMediaStreamAudioDucking| are false.
333 MockMediaConstraintFactory constraint_factory;
334 constraint_factory.AddOptional(MediaAudioConstraints::kEchoCancellation,
335 false);
336 blink::WebMediaConstraints constraints =
337 constraint_factory.CreateWebMediaConstraints();
338 MediaAudioConstraints audio_constraints(constraints, 0);
339 for (size_t i = 0; i < arraysize(kDefaultAudioConstraints); ++i) {
340 EXPECT_FALSE(audio_constraints.GetProperty(kDefaultAudioConstraints[i]));
342 #if defined(OS_WIN)
343 EXPECT_TRUE(audio_constraints.GetProperty(kMediaStreamAudioDucking));
344 #else
345 EXPECT_FALSE(audio_constraints.GetProperty(kMediaStreamAudioDucking));
346 #endif
350 // |kMediaStreamAudioHotword| is always off by default.
351 MockMediaConstraintFactory constraint_factory;
352 MediaAudioConstraints audio_constraints(
353 constraint_factory.CreateWebMediaConstraints(), 0);
354 EXPECT_FALSE(audio_constraints.GetProperty(kMediaStreamAudioHotword));
358 TEST_F(MediaStreamAudioProcessorTest, ValidateConstraints) {
359 MockMediaConstraintFactory constraint_factory;
360 const std::string dummy_constraint = "dummy";
361 constraint_factory.AddMandatory(dummy_constraint, true);
362 MediaAudioConstraints audio_constraints(
363 constraint_factory.CreateWebMediaConstraints(), 0);
364 EXPECT_FALSE(audio_constraints.IsValid());
367 TEST_F(MediaStreamAudioProcessorTest, TestAllSampleRates) {
368 MockMediaConstraintFactory constraint_factory;
369 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
370 new WebRtcAudioDeviceImpl());
371 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
372 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
373 constraint_factory.CreateWebMediaConstraints(), 0,
374 webrtc_audio_device.get()));
375 EXPECT_TRUE(audio_processor->has_audio_processing());
377 static const int kSupportedSampleRates[] =
378 { 8000, 16000, 22050, 32000, 44100, 48000, 88200, 96000 };
379 for (size_t i = 0; i < arraysize(kSupportedSampleRates); ++i) {
380 int buffer_size = (kSupportedSampleRates[i] / 100) < 128 ?
381 kSupportedSampleRates[i] / 100 : 128;
382 media::AudioParameters params(
383 media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
384 media::CHANNEL_LAYOUT_STEREO, kSupportedSampleRates[i], 16,
385 buffer_size);
386 audio_processor->OnCaptureFormatChanged(params);
387 VerifyDefaultComponents(audio_processor.get());
389 ProcessDataAndVerifyFormat(audio_processor.get(),
390 kAudioProcessingSampleRate,
391 kAudioProcessingNumberOfChannel,
392 kAudioProcessingSampleRate / 100);
395 // Set |audio_processor| to NULL to make sure |webrtc_audio_device|
396 // outlives |audio_processor|.
397 audio_processor = NULL;
400 // Test that if we have an AEC dump message filter created, we are getting it
401 // correctly in MSAP. Any IPC messages will be deleted since no sender in the
402 // filter will be created.
403 TEST_F(MediaStreamAudioProcessorTest, GetAecDumpMessageFilter) {
404 base::MessageLoopForUI message_loop;
405 scoped_refptr<AecDumpMessageFilter> aec_dump_message_filter_(
406 new AecDumpMessageFilter(message_loop.message_loop_proxy(),
407 message_loop.message_loop_proxy()));
409 MockMediaConstraintFactory constraint_factory;
410 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
411 new WebRtcAudioDeviceImpl());
412 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
413 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
414 constraint_factory.CreateWebMediaConstraints(), 0,
415 webrtc_audio_device.get()));
417 EXPECT_TRUE(audio_processor->aec_dump_message_filter_.get());
419 audio_processor = NULL;
422 TEST_F(MediaStreamAudioProcessorTest, TestStereoAudio) {
423 // Set up the correct constraints to turn off the audio processing and turn
424 // on the stereo channels mirroring.
425 MockMediaConstraintFactory constraint_factory;
426 constraint_factory.AddMandatory(MediaAudioConstraints::kEchoCancellation,
427 false);
428 constraint_factory.AddMandatory(MediaAudioConstraints::kGoogAudioMirroring,
429 true);
430 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
431 new WebRtcAudioDeviceImpl());
432 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
433 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
434 constraint_factory.CreateWebMediaConstraints(), 0,
435 webrtc_audio_device.get()));
436 EXPECT_FALSE(audio_processor->has_audio_processing());
437 const media::AudioParameters source_params(
438 media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
439 media::CHANNEL_LAYOUT_STEREO, 48000, 16, 480);
440 audio_processor->OnCaptureFormatChanged(source_params);
441 EXPECT_EQ(audio_processor->OutputFormat().channels(), 2);
443 // Construct left and right channels, and assign different values to the
444 // first data of the left channel and right channel.
445 const int size = media::AudioBus::CalculateMemorySize(source_params);
446 scoped_ptr<float, base::AlignedFreeDeleter> left_channel(
447 static_cast<float*>(base::AlignedAlloc(size, 32)));
448 scoped_ptr<float, base::AlignedFreeDeleter> right_channel(
449 static_cast<float*>(base::AlignedAlloc(size, 32)));
450 scoped_ptr<media::AudioBus> wrapper = media::AudioBus::CreateWrapper(
451 source_params.channels());
452 wrapper->set_frames(source_params.frames_per_buffer());
453 wrapper->SetChannelData(0, left_channel.get());
454 wrapper->SetChannelData(1, right_channel.get());
455 wrapper->Zero();
456 float* left_channel_ptr = left_channel.get();
457 left_channel_ptr[0] = 1.0f;
459 // Run the test consecutively to make sure the stereo channels are not
460 // flipped back and forth.
461 static const int kNumberOfPacketsForTest = 100;
462 const base::TimeDelta pushed_capture_delay =
463 base::TimeDelta::FromMilliseconds(42);
464 for (int i = 0; i < kNumberOfPacketsForTest; ++i) {
465 audio_processor->PushCaptureData(*wrapper, pushed_capture_delay);
467 media::AudioBus* processed_data = nullptr;
468 base::TimeDelta capture_delay;
469 int new_volume = 0;
470 EXPECT_TRUE(audio_processor->ProcessAndConsumeData(
471 0, false, &processed_data, &capture_delay, &new_volume));
472 EXPECT_TRUE(processed_data);
473 EXPECT_EQ(processed_data->channel(0)[0], 0);
474 EXPECT_NE(processed_data->channel(1)[0], 0);
475 EXPECT_EQ(pushed_capture_delay, capture_delay);
478 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
479 // |audio_processor|.
480 audio_processor = NULL;
483 TEST_F(MediaStreamAudioProcessorTest, TestWithKeyboardMicChannel) {
484 MockMediaConstraintFactory constraint_factory;
485 constraint_factory.AddMandatory(
486 MediaAudioConstraints::kGoogExperimentalNoiseSuppression, true);
487 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
488 new WebRtcAudioDeviceImpl());
489 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
490 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
491 constraint_factory.CreateWebMediaConstraints(), 0,
492 webrtc_audio_device.get()));
493 EXPECT_TRUE(audio_processor->has_audio_processing());
495 media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
496 media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC,
497 48000, 16, 512);
498 audio_processor->OnCaptureFormatChanged(params);
500 ProcessDataAndVerifyFormat(audio_processor.get(),
501 kAudioProcessingSampleRate,
502 kAudioProcessingNumberOfChannel,
503 kAudioProcessingSampleRate / 100);
504 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
505 // |audio_processor|.
506 audio_processor = NULL;
509 } // namespace content