cc: Make picture pile base thread safe.
[chromium-blink-merge.git] / content / renderer / media / media_stream_audio_processor_unittest.cc
blob91d7b32847d2cdc6ca2c40601bca24a55ec5e37d
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/command_line.h"
6 #include "base/files/file_path.h"
7 #include "base/files/file_util.h"
8 #include "base/logging.h"
9 #include "base/memory/aligned_memory.h"
10 #include "base/path_service.h"
11 #include "base/time/time.h"
12 #include "content/public/common/content_switches.h"
13 #include "content/public/common/media_stream_request.h"
14 #include "content/renderer/media/media_stream_audio_processor.h"
15 #include "content/renderer/media/media_stream_audio_processor_options.h"
16 #include "content/renderer/media/mock_media_constraint_factory.h"
17 #include "media/audio/audio_parameters.h"
18 #include "media/base/audio_bus.h"
19 #include "testing/gmock/include/gmock/gmock.h"
20 #include "testing/gtest/include/gtest/gtest.h"
21 #include "third_party/WebKit/public/platform/WebMediaConstraints.h"
22 #include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
24 using ::testing::_;
25 using ::testing::AnyNumber;
26 using ::testing::AtLeast;
27 using ::testing::Return;
29 namespace content {
31 namespace {
33 #if defined(ANDROID)
34 const int kAudioProcessingSampleRate = 16000;
35 #else
36 const int kAudioProcessingSampleRate = 32000;
37 #endif
38 const int kAudioProcessingNumberOfChannel = 1;
40 // The number of packers used for testing.
41 const int kNumberOfPacketsForTest = 100;
43 const int kMaxNumberOfPlayoutDataChannels = 2;
45 void ReadDataFromSpeechFile(char* data, int length) {
46 base::FilePath file;
47 CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &file));
48 file = file.Append(FILE_PATH_LITERAL("media"))
49 .Append(FILE_PATH_LITERAL("test"))
50 .Append(FILE_PATH_LITERAL("data"))
51 .Append(FILE_PATH_LITERAL("speech_16b_stereo_48kHz.raw"));
52 DCHECK(base::PathExists(file));
53 int64 data_file_size64 = 0;
54 DCHECK(base::GetFileSize(file, &data_file_size64));
55 EXPECT_EQ(length, base::ReadFile(file, data, length));
56 DCHECK(data_file_size64 > length);
59 } // namespace
61 class MediaStreamAudioProcessorTest : public ::testing::Test {
62 public:
63 MediaStreamAudioProcessorTest()
64 : params_(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
65 media::CHANNEL_LAYOUT_STEREO, 48000, 16, 512) {
68 protected:
69 // Helper method to save duplicated code.
70 void ProcessDataAndVerifyFormat(MediaStreamAudioProcessor* audio_processor,
71 int expected_output_sample_rate,
72 int expected_output_channels,
73 int expected_output_buffer_size) {
74 // Read the audio data from a file.
75 const media::AudioParameters& params = audio_processor->InputFormat();
76 const int packet_size =
77 params.frames_per_buffer() * 2 * params.channels();
78 const size_t length = packet_size * kNumberOfPacketsForTest;
79 scoped_ptr<char[]> capture_data(new char[length]);
80 ReadDataFromSpeechFile(capture_data.get(), length);
81 const int16* data_ptr = reinterpret_cast<const int16*>(capture_data.get());
82 scoped_ptr<media::AudioBus> data_bus = media::AudioBus::Create(
83 params.channels(), params.frames_per_buffer());
85 // |data_bus_playout| is used if the number of capture channels is larger
86 // that max allowed playout channels. |data_bus_playout_to_use| points to
87 // the AudioBus to use, either |data_bus| or |data_bus_playout|.
88 scoped_ptr<media::AudioBus> data_bus_playout;
89 media::AudioBus* data_bus_playout_to_use = data_bus.get();
90 if (params.channels() > kMaxNumberOfPlayoutDataChannels) {
91 data_bus_playout =
92 media::AudioBus::CreateWrapper(kMaxNumberOfPlayoutDataChannels);
93 data_bus_playout->set_frames(params.frames_per_buffer());
94 data_bus_playout_to_use = data_bus_playout.get();
97 for (int i = 0; i < kNumberOfPacketsForTest; ++i) {
98 data_bus->FromInterleaved(data_ptr, data_bus->frames(), 2);
99 audio_processor->PushCaptureData(data_bus.get());
101 // |audio_processor| does nothing when the audio processing is off in
102 // the processor.
103 webrtc::AudioProcessing* ap = audio_processor->audio_processing_.get();
104 #if defined(OS_ANDROID) || defined(OS_IOS)
105 const bool is_aec_enabled = ap && ap->echo_control_mobile()->is_enabled();
106 // AEC should be turned off for mobiles.
107 DCHECK(!ap || !ap->echo_cancellation()->is_enabled());
108 #else
109 const bool is_aec_enabled = ap && ap->echo_cancellation()->is_enabled();
110 #endif
111 if (is_aec_enabled) {
112 if (params.channels() > kMaxNumberOfPlayoutDataChannels) {
113 for (int i = 0; i < kMaxNumberOfPlayoutDataChannels; ++i) {
114 data_bus_playout->SetChannelData(
115 i, const_cast<float*>(data_bus->channel(i)));
118 audio_processor->OnPlayoutData(data_bus_playout_to_use,
119 params.sample_rate(), 10);
122 int16* output = NULL;
123 int new_volume = 0;
124 while(audio_processor->ProcessAndConsumeData(
125 base::TimeDelta::FromMilliseconds(10), 255, false, &new_volume,
126 &output)) {
127 EXPECT_TRUE(output != NULL);
128 EXPECT_EQ(audio_processor->OutputFormat().sample_rate(),
129 expected_output_sample_rate);
130 EXPECT_EQ(audio_processor->OutputFormat().channels(),
131 expected_output_channels);
132 EXPECT_EQ(audio_processor->OutputFormat().frames_per_buffer(),
133 expected_output_buffer_size);
136 data_ptr += params.frames_per_buffer() * params.channels();
140 void VerifyDefaultComponents(MediaStreamAudioProcessor* audio_processor) {
141 webrtc::AudioProcessing* audio_processing =
142 audio_processor->audio_processing_.get();
143 #if defined(OS_ANDROID)
144 EXPECT_TRUE(audio_processing->echo_control_mobile()->is_enabled());
145 EXPECT_TRUE(audio_processing->echo_control_mobile()->routing_mode() ==
146 webrtc::EchoControlMobile::kSpeakerphone);
147 EXPECT_FALSE(audio_processing->echo_cancellation()->is_enabled());
148 #elif !defined(OS_IOS)
149 EXPECT_TRUE(audio_processing->echo_cancellation()->is_enabled());
150 EXPECT_TRUE(audio_processing->echo_cancellation()->suppression_level() ==
151 webrtc::EchoCancellation::kHighSuppression);
152 EXPECT_TRUE(audio_processing->echo_cancellation()->are_metrics_enabled());
153 EXPECT_TRUE(
154 audio_processing->echo_cancellation()->is_delay_logging_enabled());
155 #endif
157 EXPECT_TRUE(audio_processing->noise_suppression()->is_enabled());
158 EXPECT_TRUE(audio_processing->noise_suppression()->level() ==
159 webrtc::NoiseSuppression::kHigh);
160 EXPECT_TRUE(audio_processing->high_pass_filter()->is_enabled());
161 EXPECT_TRUE(audio_processing->gain_control()->is_enabled());
162 #if defined(OS_ANDROID) || defined(OS_IOS)
163 EXPECT_TRUE(audio_processing->gain_control()->mode() ==
164 webrtc::GainControl::kFixedDigital);
165 EXPECT_FALSE(audio_processing->voice_detection()->is_enabled());
166 #else
167 EXPECT_TRUE(audio_processing->gain_control()->mode() ==
168 webrtc::GainControl::kAdaptiveAnalog);
169 EXPECT_TRUE(audio_processing->voice_detection()->is_enabled());
170 EXPECT_TRUE(audio_processing->voice_detection()->likelihood() ==
171 webrtc::VoiceDetection::kVeryLowLikelihood);
172 #endif
175 media::AudioParameters params_;
178 TEST_F(MediaStreamAudioProcessorTest, WithoutAudioProcessing) {
179 // Setup the audio processor with disabled flag on.
180 CommandLine::ForCurrentProcess()->AppendSwitch(
181 switches::kDisableAudioTrackProcessing);
182 MockMediaConstraintFactory constraint_factory;
183 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
184 new WebRtcAudioDeviceImpl());
185 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
186 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
187 constraint_factory.CreateWebMediaConstraints(), 0,
188 webrtc_audio_device.get()));
189 EXPECT_FALSE(audio_processor->has_audio_processing());
190 audio_processor->OnCaptureFormatChanged(params_);
192 ProcessDataAndVerifyFormat(audio_processor.get(),
193 params_.sample_rate(),
194 params_.channels(),
195 params_.sample_rate() / 100);
196 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
197 // |audio_processor|.
198 audio_processor = NULL;
201 TEST_F(MediaStreamAudioProcessorTest, WithAudioProcessing) {
202 MockMediaConstraintFactory constraint_factory;
203 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
204 new WebRtcAudioDeviceImpl());
205 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
206 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
207 constraint_factory.CreateWebMediaConstraints(), 0,
208 webrtc_audio_device.get()));
209 EXPECT_TRUE(audio_processor->has_audio_processing());
210 audio_processor->OnCaptureFormatChanged(params_);
211 VerifyDefaultComponents(audio_processor.get());
213 ProcessDataAndVerifyFormat(audio_processor.get(),
214 kAudioProcessingSampleRate,
215 kAudioProcessingNumberOfChannel,
216 kAudioProcessingSampleRate / 100);
217 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
218 // |audio_processor|.
219 audio_processor = NULL;
222 TEST_F(MediaStreamAudioProcessorTest, VerifyTabCaptureWithoutAudioProcessing) {
223 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
224 new WebRtcAudioDeviceImpl());
225 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceTab source.
226 MockMediaConstraintFactory tab_constraint_factory;
227 const std::string tab_string = kMediaStreamSourceTab;
228 tab_constraint_factory.AddMandatory(kMediaStreamSource,
229 tab_string);
230 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
231 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
232 tab_constraint_factory.CreateWebMediaConstraints(), 0,
233 webrtc_audio_device.get()));
234 EXPECT_FALSE(audio_processor->has_audio_processing());
235 audio_processor->OnCaptureFormatChanged(params_);
237 ProcessDataAndVerifyFormat(audio_processor.get(),
238 params_.sample_rate(),
239 params_.channels(),
240 params_.sample_rate() / 100);
242 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceSystem
243 // source.
244 MockMediaConstraintFactory system_constraint_factory;
245 const std::string system_string = kMediaStreamSourceSystem;
246 system_constraint_factory.AddMandatory(kMediaStreamSource,
247 system_string);
248 audio_processor = new rtc::RefCountedObject<MediaStreamAudioProcessor>(
249 system_constraint_factory.CreateWebMediaConstraints(), 0,
250 webrtc_audio_device.get());
251 EXPECT_FALSE(audio_processor->has_audio_processing());
253 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
254 // |audio_processor|.
255 audio_processor = NULL;
258 TEST_F(MediaStreamAudioProcessorTest, TurnOffDefaultConstraints) {
259 // Turn off the default constraints and pass it to MediaStreamAudioProcessor.
260 MockMediaConstraintFactory constraint_factory;
261 constraint_factory.DisableDefaultAudioConstraints();
262 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
263 new WebRtcAudioDeviceImpl());
264 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
265 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
266 constraint_factory.CreateWebMediaConstraints(), 0,
267 webrtc_audio_device.get()));
268 EXPECT_FALSE(audio_processor->has_audio_processing());
269 audio_processor->OnCaptureFormatChanged(params_);
271 ProcessDataAndVerifyFormat(audio_processor.get(),
272 params_.sample_rate(),
273 params_.channels(),
274 params_.sample_rate() / 100);
275 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
276 // |audio_processor|.
277 audio_processor = NULL;
280 TEST_F(MediaStreamAudioProcessorTest, VerifyConstraints) {
281 static const char* kDefaultAudioConstraints[] = {
282 MediaAudioConstraints::kEchoCancellation,
283 MediaAudioConstraints::kGoogAudioMirroring,
284 MediaAudioConstraints::kGoogAutoGainControl,
285 MediaAudioConstraints::kGoogEchoCancellation,
286 MediaAudioConstraints::kGoogExperimentalEchoCancellation,
287 MediaAudioConstraints::kGoogExperimentalAutoGainControl,
288 MediaAudioConstraints::kGoogExperimentalNoiseSuppression,
289 MediaAudioConstraints::kGoogHighpassFilter,
290 MediaAudioConstraints::kGoogNoiseSuppression,
291 MediaAudioConstraints::kGoogTypingNoiseDetection
294 // Verify mandatory constraints.
295 for (size_t i = 0; i < arraysize(kDefaultAudioConstraints); ++i) {
296 MockMediaConstraintFactory constraint_factory;
297 constraint_factory.AddMandatory(kDefaultAudioConstraints[i], false);
298 blink::WebMediaConstraints constraints =
299 constraint_factory.CreateWebMediaConstraints();
300 MediaAudioConstraints audio_constraints(constraints, 0);
301 EXPECT_FALSE(audio_constraints.GetProperty(kDefaultAudioConstraints[i]));
304 // Verify optional constraints.
305 for (size_t i = 0; i < arraysize(kDefaultAudioConstraints); ++i) {
306 MockMediaConstraintFactory constraint_factory;
307 constraint_factory.AddOptional(kDefaultAudioConstraints[i], false);
308 blink::WebMediaConstraints constraints =
309 constraint_factory.CreateWebMediaConstraints();
310 MediaAudioConstraints audio_constraints(constraints, 0);
311 EXPECT_FALSE(audio_constraints.GetProperty(kDefaultAudioConstraints[i]));
315 // Verify echo cancellation is off when platform aec effect is on.
316 MockMediaConstraintFactory constraint_factory;
317 MediaAudioConstraints audio_constraints(
318 constraint_factory.CreateWebMediaConstraints(),
319 media::AudioParameters::ECHO_CANCELLER);
320 EXPECT_FALSE(audio_constraints.GetEchoCancellationProperty());
324 // Verify |kEchoCancellation| overwrite |kGoogEchoCancellation|.
325 MockMediaConstraintFactory constraint_factory_1;
326 constraint_factory_1.AddOptional(MediaAudioConstraints::kEchoCancellation,
327 true);
328 constraint_factory_1.AddOptional(
329 MediaAudioConstraints::kGoogEchoCancellation, false);
330 blink::WebMediaConstraints constraints_1 =
331 constraint_factory_1.CreateWebMediaConstraints();
332 MediaAudioConstraints audio_constraints_1(constraints_1, 0);
333 EXPECT_TRUE(audio_constraints_1.GetEchoCancellationProperty());
335 MockMediaConstraintFactory constraint_factory_2;
336 constraint_factory_2.AddOptional(MediaAudioConstraints::kEchoCancellation,
337 false);
338 constraint_factory_2.AddOptional(
339 MediaAudioConstraints::kGoogEchoCancellation, true);
340 blink::WebMediaConstraints constraints_2 =
341 constraint_factory_2.CreateWebMediaConstraints();
342 MediaAudioConstraints audio_constraints_2(constraints_2, 0);
343 EXPECT_FALSE(audio_constraints_2.GetEchoCancellationProperty());
347 // When |kEchoCancellation| is explicitly set to false, the default values
348 // for all the constraints except |kMediaStreamAudioDucking| are false.
349 MockMediaConstraintFactory constraint_factory;
350 constraint_factory.AddOptional(MediaAudioConstraints::kEchoCancellation,
351 false);
352 blink::WebMediaConstraints constraints =
353 constraint_factory.CreateWebMediaConstraints();
354 MediaAudioConstraints audio_constraints(constraints, 0);
355 for (size_t i = 0; i < arraysize(kDefaultAudioConstraints); ++i) {
356 EXPECT_FALSE(audio_constraints.GetProperty(kDefaultAudioConstraints[i]));
358 EXPECT_FALSE(audio_constraints.NeedsAudioProcessing());
359 #if defined(OS_WIN)
360 EXPECT_TRUE(audio_constraints.GetProperty(kMediaStreamAudioDucking));
361 #else
362 EXPECT_FALSE(audio_constraints.GetProperty(kMediaStreamAudioDucking));
363 #endif
367 TEST_F(MediaStreamAudioProcessorTest, ValidateConstraints) {
368 MockMediaConstraintFactory constraint_factory;
369 const std::string dummy_constraint = "dummy";
370 constraint_factory.AddMandatory(dummy_constraint, true);
371 MediaAudioConstraints audio_constraints(
372 constraint_factory.CreateWebMediaConstraints(), 0);
373 EXPECT_FALSE(audio_constraints.IsValid());
376 TEST_F(MediaStreamAudioProcessorTest, TestAllSampleRates) {
377 MockMediaConstraintFactory constraint_factory;
378 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
379 new WebRtcAudioDeviceImpl());
380 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
381 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
382 constraint_factory.CreateWebMediaConstraints(), 0,
383 webrtc_audio_device.get()));
384 EXPECT_TRUE(audio_processor->has_audio_processing());
386 static const int kSupportedSampleRates[] =
387 { 8000, 16000, 22050, 32000, 44100, 48000, 88200, 96000 };
388 for (size_t i = 0; i < arraysize(kSupportedSampleRates); ++i) {
389 int buffer_size = (kSupportedSampleRates[i] / 100) < 128 ?
390 kSupportedSampleRates[i] / 100 : 128;
391 media::AudioParameters params(
392 media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
393 media::CHANNEL_LAYOUT_STEREO, kSupportedSampleRates[i], 16,
394 buffer_size);
395 audio_processor->OnCaptureFormatChanged(params);
396 VerifyDefaultComponents(audio_processor.get());
398 ProcessDataAndVerifyFormat(audio_processor.get(),
399 kAudioProcessingSampleRate,
400 kAudioProcessingNumberOfChannel,
401 kAudioProcessingSampleRate / 100);
404 // Set |audio_processor| to NULL to make sure |webrtc_audio_device|
405 // outlives |audio_processor|.
406 audio_processor = NULL;
409 // Test that if we have an AEC dump message filter created, we are getting it
410 // correctly in MSAP. Any IPC messages will be deleted since no sender in the
411 // filter will be created.
412 TEST_F(MediaStreamAudioProcessorTest, GetAecDumpMessageFilter) {
413 base::MessageLoopForUI message_loop;
414 scoped_refptr<AecDumpMessageFilter> aec_dump_message_filter_(
415 new AecDumpMessageFilter(message_loop.message_loop_proxy(),
416 message_loop.message_loop_proxy()));
418 MockMediaConstraintFactory constraint_factory;
419 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
420 new WebRtcAudioDeviceImpl());
421 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
422 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
423 constraint_factory.CreateWebMediaConstraints(), 0,
424 webrtc_audio_device.get()));
426 EXPECT_TRUE(audio_processor->aec_dump_message_filter_.get());
428 audio_processor = NULL;
431 TEST_F(MediaStreamAudioProcessorTest, TestStereoAudio) {
432 // Set up the correct constraints to turn off the audio processing and turn
433 // on the stereo channels mirroring.
434 MockMediaConstraintFactory constraint_factory;
435 constraint_factory.AddMandatory(MediaAudioConstraints::kEchoCancellation,
436 false);
437 constraint_factory.AddMandatory(MediaAudioConstraints::kGoogAudioMirroring,
438 true);
439 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
440 new WebRtcAudioDeviceImpl());
441 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
442 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
443 constraint_factory.CreateWebMediaConstraints(), 0,
444 webrtc_audio_device.get()));
445 EXPECT_FALSE(audio_processor->has_audio_processing());
446 const media::AudioParameters source_params(
447 media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
448 media::CHANNEL_LAYOUT_STEREO, 48000, 16, 480);
449 audio_processor->OnCaptureFormatChanged(source_params);
450 EXPECT_EQ(audio_processor->OutputFormat().channels(), 2);
452 // Construct left and right channels, and assign different values to the
453 // first data of the left channel and right channel.
454 const int size = media::AudioBus::CalculateMemorySize(source_params);
455 scoped_ptr<float, base::AlignedFreeDeleter> left_channel(
456 static_cast<float*>(base::AlignedAlloc(size, 32)));
457 scoped_ptr<float, base::AlignedFreeDeleter> right_channel(
458 static_cast<float*>(base::AlignedAlloc(size, 32)));
459 scoped_ptr<media::AudioBus> wrapper = media::AudioBus::CreateWrapper(
460 source_params.channels());
461 wrapper->set_frames(source_params.frames_per_buffer());
462 wrapper->SetChannelData(0, left_channel.get());
463 wrapper->SetChannelData(1, right_channel.get());
464 wrapper->Zero();
465 float* left_channel_ptr = left_channel.get();
466 left_channel_ptr[0] = 1.0f;
468 // A audio bus used for verifying the output data values.
469 scoped_ptr<media::AudioBus> output_bus = media::AudioBus::Create(
470 audio_processor->OutputFormat());
472 // Run the test consecutively to make sure the stereo channels are not
473 // flipped back and forth.
474 static const int kNumberOfPacketsForTest = 100;
475 for (int i = 0; i < kNumberOfPacketsForTest; ++i) {
476 audio_processor->PushCaptureData(wrapper.get());
478 int16* output = NULL;
479 int new_volume = 0;
480 EXPECT_TRUE(audio_processor->ProcessAndConsumeData(
481 base::TimeDelta::FromMilliseconds(0), 0, false, &new_volume, &output));
482 output_bus->FromInterleaved(output, output_bus->frames(), 2);
483 EXPECT_TRUE(output != NULL);
484 EXPECT_EQ(output_bus->channel(0)[0], 0);
485 EXPECT_NE(output_bus->channel(1)[0], 0);
488 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
489 // |audio_processor|.
490 audio_processor = NULL;
493 TEST_F(MediaStreamAudioProcessorTest, TestWithKeyboardMicChannel) {
494 MockMediaConstraintFactory constraint_factory;
495 constraint_factory.AddMandatory(
496 MediaAudioConstraints::kGoogExperimentalNoiseSuppression, true);
497 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
498 new WebRtcAudioDeviceImpl());
499 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
500 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
501 constraint_factory.CreateWebMediaConstraints(), 0,
502 webrtc_audio_device.get()));
503 EXPECT_TRUE(audio_processor->has_audio_processing());
505 media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
506 media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC,
507 48000, 16, 512);
508 audio_processor->OnCaptureFormatChanged(params);
510 ProcessDataAndVerifyFormat(audio_processor.get(),
511 kAudioProcessingSampleRate,
512 kAudioProcessingNumberOfChannel,
513 kAudioProcessingSampleRate / 100);
514 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
515 // |audio_processor|.
516 audio_processor = NULL;
519 TEST_F(MediaStreamAudioProcessorTest,
520 TestWithKeyboardMicChannelWithoutProcessing) {
521 // Setup the audio processor with disabled flag on.
522 CommandLine::ForCurrentProcess()->AppendSwitch(
523 switches::kDisableAudioTrackProcessing);
524 MockMediaConstraintFactory constraint_factory;
525 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
526 new WebRtcAudioDeviceImpl());
527 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
528 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
529 constraint_factory.CreateWebMediaConstraints(), 0,
530 webrtc_audio_device.get()));
531 EXPECT_FALSE(audio_processor->has_audio_processing());
533 media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
534 media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC,
535 48000, 16, 512);
536 audio_processor->OnCaptureFormatChanged(params);
538 ProcessDataAndVerifyFormat(
539 audio_processor.get(),
540 params.sample_rate(),
541 media::ChannelLayoutToChannelCount(media::CHANNEL_LAYOUT_STEREO),
542 params.sample_rate() / 100);
543 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
544 // |audio_processor|.
545 audio_processor = NULL;
548 } // namespace content