Updating trunk VERSION from 2139.0 to 2140.0
[chromium-blink-merge.git] / content / renderer / media / media_stream_audio_processor_unittest.cc
blob4b813db5dacbc48871a52e0b45a2b2fda71c43d7
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/command_line.h"
6 #include "base/file_util.h"
7 #include "base/files/file_path.h"
8 #include "base/logging.h"
9 #include "base/memory/aligned_memory.h"
10 #include "base/path_service.h"
11 #include "base/time/time.h"
12 #include "content/public/common/content_switches.h"
13 #include "content/public/common/media_stream_request.h"
14 #include "content/renderer/media/media_stream_audio_processor.h"
15 #include "content/renderer/media/media_stream_audio_processor_options.h"
16 #include "content/renderer/media/mock_media_constraint_factory.h"
17 #include "media/audio/audio_parameters.h"
18 #include "media/base/audio_bus.h"
19 #include "testing/gmock/include/gmock/gmock.h"
20 #include "testing/gtest/include/gtest/gtest.h"
21 #include "third_party/WebKit/public/platform/WebMediaConstraints.h"
22 #include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
24 using ::testing::_;
25 using ::testing::AnyNumber;
26 using ::testing::AtLeast;
27 using ::testing::Return;
29 namespace content {
31 namespace {
33 #if defined(ANDROID)
34 const int kAudioProcessingSampleRate = 16000;
35 #else
36 const int kAudioProcessingSampleRate = 32000;
37 #endif
38 const int kAudioProcessingNumberOfChannel = 1;
40 // The number of packers used for testing.
41 const int kNumberOfPacketsForTest = 100;
43 void ReadDataFromSpeechFile(char* data, int length) {
44 base::FilePath file;
45 CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &file));
46 file = file.Append(FILE_PATH_LITERAL("media"))
47 .Append(FILE_PATH_LITERAL("test"))
48 .Append(FILE_PATH_LITERAL("data"))
49 .Append(FILE_PATH_LITERAL("speech_16b_stereo_48kHz.raw"));
50 DCHECK(base::PathExists(file));
51 int64 data_file_size64 = 0;
52 DCHECK(base::GetFileSize(file, &data_file_size64));
53 EXPECT_EQ(length, base::ReadFile(file, data, length));
54 DCHECK(data_file_size64 > length);
57 } // namespace
59 class MediaStreamAudioProcessorTest : public ::testing::Test {
60 public:
61 MediaStreamAudioProcessorTest()
62 : params_(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
63 media::CHANNEL_LAYOUT_STEREO, 48000, 16, 512) {
66 protected:
67 // Helper method to save duplicated code.
68 void ProcessDataAndVerifyFormat(MediaStreamAudioProcessor* audio_processor,
69 int expected_output_sample_rate,
70 int expected_output_channels,
71 int expected_output_buffer_size) {
72 // Read the audio data from a file.
73 const media::AudioParameters& params = audio_processor->InputFormat();
74 const int packet_size =
75 params.frames_per_buffer() * 2 * params.channels();
76 const size_t length = packet_size * kNumberOfPacketsForTest;
77 scoped_ptr<char[]> capture_data(new char[length]);
78 ReadDataFromSpeechFile(capture_data.get(), length);
79 const int16* data_ptr = reinterpret_cast<const int16*>(capture_data.get());
80 scoped_ptr<media::AudioBus> data_bus = media::AudioBus::Create(
81 params.channels(), params.frames_per_buffer());
82 for (int i = 0; i < kNumberOfPacketsForTest; ++i) {
83 data_bus->FromInterleaved(data_ptr, data_bus->frames(), 2);
84 audio_processor->PushCaptureData(data_bus.get());
86 // |audio_processor| does nothing when the audio processing is off in
87 // the processor.
88 webrtc::AudioProcessing* ap = audio_processor->audio_processing_.get();
89 #if defined(OS_ANDROID) || defined(OS_IOS)
90 const bool is_aec_enabled = ap && ap->echo_control_mobile()->is_enabled();
91 // AEC should be turned off for mobiles.
92 DCHECK(!ap || !ap->echo_cancellation()->is_enabled());
93 #else
94 const bool is_aec_enabled = ap && ap->echo_cancellation()->is_enabled();
95 #endif
96 if (is_aec_enabled) {
97 audio_processor->OnPlayoutData(data_bus.get(), params.sample_rate(),
98 10);
101 int16* output = NULL;
102 int new_volume = 0;
103 while(audio_processor->ProcessAndConsumeData(
104 base::TimeDelta::FromMilliseconds(10), 255, false, &new_volume,
105 &output)) {
106 EXPECT_TRUE(output != NULL);
107 EXPECT_EQ(audio_processor->OutputFormat().sample_rate(),
108 expected_output_sample_rate);
109 EXPECT_EQ(audio_processor->OutputFormat().channels(),
110 expected_output_channels);
111 EXPECT_EQ(audio_processor->OutputFormat().frames_per_buffer(),
112 expected_output_buffer_size);
115 data_ptr += params.frames_per_buffer() * params.channels();
119 void VerifyDefaultComponents(MediaStreamAudioProcessor* audio_processor) {
120 webrtc::AudioProcessing* audio_processing =
121 audio_processor->audio_processing_.get();
122 #if defined(OS_ANDROID)
123 EXPECT_TRUE(audio_processing->echo_control_mobile()->is_enabled());
124 EXPECT_TRUE(audio_processing->echo_control_mobile()->routing_mode() ==
125 webrtc::EchoControlMobile::kSpeakerphone);
126 EXPECT_FALSE(audio_processing->echo_cancellation()->is_enabled());
127 #elif !defined(OS_IOS)
128 EXPECT_TRUE(audio_processing->echo_cancellation()->is_enabled());
129 EXPECT_TRUE(audio_processing->echo_cancellation()->suppression_level() ==
130 webrtc::EchoCancellation::kHighSuppression);
131 EXPECT_TRUE(audio_processing->echo_cancellation()->are_metrics_enabled());
132 EXPECT_TRUE(
133 audio_processing->echo_cancellation()->is_delay_logging_enabled());
134 #endif
136 EXPECT_TRUE(audio_processing->noise_suppression()->is_enabled());
137 EXPECT_TRUE(audio_processing->noise_suppression()->level() ==
138 webrtc::NoiseSuppression::kHigh);
139 EXPECT_TRUE(audio_processing->high_pass_filter()->is_enabled());
140 EXPECT_TRUE(audio_processing->gain_control()->is_enabled());
141 #if defined(OS_ANDROID) || defined(OS_IOS)
142 EXPECT_TRUE(audio_processing->gain_control()->mode() ==
143 webrtc::GainControl::kFixedDigital);
144 EXPECT_FALSE(audio_processing->voice_detection()->is_enabled());
145 #else
146 EXPECT_TRUE(audio_processing->gain_control()->mode() ==
147 webrtc::GainControl::kAdaptiveAnalog);
148 EXPECT_TRUE(audio_processing->voice_detection()->is_enabled());
149 EXPECT_TRUE(audio_processing->voice_detection()->likelihood() ==
150 webrtc::VoiceDetection::kVeryLowLikelihood);
151 #endif
154 media::AudioParameters params_;
157 TEST_F(MediaStreamAudioProcessorTest, WithoutAudioProcessing) {
158 // Setup the audio processor with disabled flag on.
159 CommandLine::ForCurrentProcess()->AppendSwitch(
160 switches::kDisableAudioTrackProcessing);
161 MockMediaConstraintFactory constraint_factory;
162 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
163 new WebRtcAudioDeviceImpl());
164 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
165 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
166 constraint_factory.CreateWebMediaConstraints(), 0,
167 webrtc_audio_device.get()));
168 EXPECT_FALSE(audio_processor->has_audio_processing());
169 audio_processor->OnCaptureFormatChanged(params_);
171 ProcessDataAndVerifyFormat(audio_processor.get(),
172 params_.sample_rate(),
173 params_.channels(),
174 params_.sample_rate() / 100);
175 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
176 // |audio_processor|.
177 audio_processor = NULL;
180 TEST_F(MediaStreamAudioProcessorTest, WithAudioProcessing) {
181 MockMediaConstraintFactory constraint_factory;
182 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
183 new WebRtcAudioDeviceImpl());
184 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
185 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
186 constraint_factory.CreateWebMediaConstraints(), 0,
187 webrtc_audio_device.get()));
188 EXPECT_TRUE(audio_processor->has_audio_processing());
189 audio_processor->OnCaptureFormatChanged(params_);
190 VerifyDefaultComponents(audio_processor.get());
192 ProcessDataAndVerifyFormat(audio_processor.get(),
193 kAudioProcessingSampleRate,
194 kAudioProcessingNumberOfChannel,
195 kAudioProcessingSampleRate / 100);
196 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
197 // |audio_processor|.
198 audio_processor = NULL;
201 TEST_F(MediaStreamAudioProcessorTest, VerifyTabCaptureWithoutAudioProcessing) {
202 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
203 new WebRtcAudioDeviceImpl());
204 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceTab source.
205 MockMediaConstraintFactory tab_constraint_factory;
206 const std::string tab_string = kMediaStreamSourceTab;
207 tab_constraint_factory.AddMandatory(kMediaStreamSource,
208 tab_string);
209 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
210 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
211 tab_constraint_factory.CreateWebMediaConstraints(), 0,
212 webrtc_audio_device.get()));
213 EXPECT_FALSE(audio_processor->has_audio_processing());
214 audio_processor->OnCaptureFormatChanged(params_);
216 ProcessDataAndVerifyFormat(audio_processor.get(),
217 params_.sample_rate(),
218 params_.channels(),
219 params_.sample_rate() / 100);
221 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceSystem
222 // source.
223 MockMediaConstraintFactory system_constraint_factory;
224 const std::string system_string = kMediaStreamSourceSystem;
225 system_constraint_factory.AddMandatory(kMediaStreamSource,
226 system_string);
227 audio_processor = new rtc::RefCountedObject<MediaStreamAudioProcessor>(
228 system_constraint_factory.CreateWebMediaConstraints(), 0,
229 webrtc_audio_device.get());
230 EXPECT_FALSE(audio_processor->has_audio_processing());
232 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
233 // |audio_processor|.
234 audio_processor = NULL;
237 TEST_F(MediaStreamAudioProcessorTest, TurnOffDefaultConstraints) {
238 // Turn off the default constraints and pass it to MediaStreamAudioProcessor.
239 MockMediaConstraintFactory constraint_factory;
240 constraint_factory.DisableDefaultAudioConstraints();
241 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
242 new WebRtcAudioDeviceImpl());
243 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
244 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
245 constraint_factory.CreateWebMediaConstraints(), 0,
246 webrtc_audio_device.get()));
247 EXPECT_FALSE(audio_processor->has_audio_processing());
248 audio_processor->OnCaptureFormatChanged(params_);
250 ProcessDataAndVerifyFormat(audio_processor.get(),
251 params_.sample_rate(),
252 params_.channels(),
253 params_.sample_rate() / 100);
254 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
255 // |audio_processor|.
256 audio_processor = NULL;
259 TEST_F(MediaStreamAudioProcessorTest, VerifyConstraints) {
260 static const char* kDefaultAudioConstraints[] = {
261 MediaAudioConstraints::kEchoCancellation,
262 MediaAudioConstraints::kGoogAudioMirroring,
263 MediaAudioConstraints::kGoogAutoGainControl,
264 MediaAudioConstraints::kGoogEchoCancellation,
265 MediaAudioConstraints::kGoogExperimentalEchoCancellation,
266 MediaAudioConstraints::kGoogExperimentalAutoGainControl,
267 MediaAudioConstraints::kGoogExperimentalNoiseSuppression,
268 MediaAudioConstraints::kGoogHighpassFilter,
269 MediaAudioConstraints::kGoogNoiseSuppression,
270 MediaAudioConstraints::kGoogTypingNoiseDetection
273 // Verify mandatory constraints.
274 for (size_t i = 0; i < arraysize(kDefaultAudioConstraints); ++i) {
275 MockMediaConstraintFactory constraint_factory;
276 constraint_factory.AddMandatory(kDefaultAudioConstraints[i], false);
277 blink::WebMediaConstraints constraints =
278 constraint_factory.CreateWebMediaConstraints();
279 MediaAudioConstraints audio_constraints(constraints, 0);
280 EXPECT_FALSE(audio_constraints.GetProperty(kDefaultAudioConstraints[i]));
283 // Verify optional constraints.
284 for (size_t i = 0; i < arraysize(kDefaultAudioConstraints); ++i) {
285 MockMediaConstraintFactory constraint_factory;
286 constraint_factory.AddOptional(kDefaultAudioConstraints[i], false);
287 blink::WebMediaConstraints constraints =
288 constraint_factory.CreateWebMediaConstraints();
289 MediaAudioConstraints audio_constraints(constraints, 0);
290 EXPECT_FALSE(audio_constraints.GetProperty(kDefaultAudioConstraints[i]));
294 // Verify echo cancellation is off when platform aec effect is on.
295 MockMediaConstraintFactory constraint_factory;
296 MediaAudioConstraints audio_constraints(
297 constraint_factory.CreateWebMediaConstraints(),
298 media::AudioParameters::ECHO_CANCELLER);
299 EXPECT_FALSE(audio_constraints.GetEchoCancellationProperty());
303 // Verify |kEchoCancellation| overwrite |kGoogEchoCancellation|.
304 MockMediaConstraintFactory constraint_factory_1;
305 constraint_factory_1.AddOptional(MediaAudioConstraints::kEchoCancellation,
306 true);
307 constraint_factory_1.AddOptional(
308 MediaAudioConstraints::kGoogEchoCancellation, false);
309 blink::WebMediaConstraints constraints_1 =
310 constraint_factory_1.CreateWebMediaConstraints();
311 MediaAudioConstraints audio_constraints_1(constraints_1, 0);
312 EXPECT_TRUE(audio_constraints_1.GetEchoCancellationProperty());
314 MockMediaConstraintFactory constraint_factory_2;
315 constraint_factory_2.AddOptional(MediaAudioConstraints::kEchoCancellation,
316 false);
317 constraint_factory_2.AddOptional(
318 MediaAudioConstraints::kGoogEchoCancellation, true);
319 blink::WebMediaConstraints constraints_2 =
320 constraint_factory_2.CreateWebMediaConstraints();
321 MediaAudioConstraints audio_constraints_2(constraints_2, 0);
322 EXPECT_FALSE(audio_constraints_2.GetEchoCancellationProperty());
326 // When |kEchoCancellation| is explicitly set to false, the default values
327 // for all the constraints except |kMediaStreamAudioDucking| are false.
328 MockMediaConstraintFactory constraint_factory;
329 constraint_factory.AddOptional(MediaAudioConstraints::kEchoCancellation,
330 false);
331 blink::WebMediaConstraints constraints =
332 constraint_factory.CreateWebMediaConstraints();
333 MediaAudioConstraints audio_constraints(constraints, 0);
334 for (size_t i = 0; i < arraysize(kDefaultAudioConstraints); ++i) {
335 EXPECT_FALSE(audio_constraints.GetProperty(kDefaultAudioConstraints[i]));
337 EXPECT_FALSE(audio_constraints.NeedsAudioProcessing());
338 #if defined(OS_WIN)
339 EXPECT_TRUE(audio_constraints.GetProperty(kMediaStreamAudioDucking));
340 #else
341 EXPECT_FALSE(audio_constraints.GetProperty(kMediaStreamAudioDucking));
342 #endif
346 TEST_F(MediaStreamAudioProcessorTest, ValidateConstraints) {
347 MockMediaConstraintFactory constraint_factory;
348 const std::string dummy_constraint = "dummy";
349 constraint_factory.AddMandatory(dummy_constraint, true);
350 MediaAudioConstraints audio_constraints(
351 constraint_factory.CreateWebMediaConstraints(), 0);
352 EXPECT_FALSE(audio_constraints.IsValid());
355 TEST_F(MediaStreamAudioProcessorTest, TestAllSampleRates) {
356 MockMediaConstraintFactory constraint_factory;
357 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
358 new WebRtcAudioDeviceImpl());
359 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
360 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
361 constraint_factory.CreateWebMediaConstraints(), 0,
362 webrtc_audio_device.get()));
363 EXPECT_TRUE(audio_processor->has_audio_processing());
365 static const int kSupportedSampleRates[] =
366 { 8000, 16000, 22050, 32000, 44100, 48000, 88200, 96000 };
367 for (size_t i = 0; i < arraysize(kSupportedSampleRates); ++i) {
368 int buffer_size = (kSupportedSampleRates[i] / 100) < 128 ?
369 kSupportedSampleRates[i] / 100 : 128;
370 media::AudioParameters params(
371 media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
372 media::CHANNEL_LAYOUT_STEREO, kSupportedSampleRates[i], 16,
373 buffer_size);
374 audio_processor->OnCaptureFormatChanged(params);
375 VerifyDefaultComponents(audio_processor.get());
377 ProcessDataAndVerifyFormat(audio_processor.get(),
378 kAudioProcessingSampleRate,
379 kAudioProcessingNumberOfChannel,
380 kAudioProcessingSampleRate / 100);
383 // Set |audio_processor| to NULL to make sure |webrtc_audio_device|
384 // outlives |audio_processor|.
385 audio_processor = NULL;
388 // Test that if we have an AEC dump message filter created, we are getting it
389 // correctly in MSAP. Any IPC messages will be deleted since no sender in the
390 // filter will be created.
391 TEST_F(MediaStreamAudioProcessorTest, GetAecDumpMessageFilter) {
392 base::MessageLoopForUI message_loop;
393 scoped_refptr<AecDumpMessageFilter> aec_dump_message_filter_(
394 new AecDumpMessageFilter(message_loop.message_loop_proxy(),
395 message_loop.message_loop_proxy()));
397 MockMediaConstraintFactory constraint_factory;
398 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
399 new WebRtcAudioDeviceImpl());
400 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
401 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
402 constraint_factory.CreateWebMediaConstraints(), 0,
403 webrtc_audio_device.get()));
405 EXPECT_TRUE(audio_processor->aec_dump_message_filter_.get());
407 audio_processor = NULL;
410 TEST_F(MediaStreamAudioProcessorTest, TestStereoAudio) {
411 // Set up the correct constraints to turn off the audio processing and turn
412 // on the stereo channels mirroring.
413 MockMediaConstraintFactory constraint_factory;
414 constraint_factory.AddMandatory(MediaAudioConstraints::kEchoCancellation,
415 false);
416 constraint_factory.AddMandatory(MediaAudioConstraints::kGoogAudioMirroring,
417 true);
418 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
419 new WebRtcAudioDeviceImpl());
420 scoped_refptr<MediaStreamAudioProcessor> audio_processor(
421 new rtc::RefCountedObject<MediaStreamAudioProcessor>(
422 constraint_factory.CreateWebMediaConstraints(), 0,
423 webrtc_audio_device.get()));
424 EXPECT_FALSE(audio_processor->has_audio_processing());
425 const media::AudioParameters source_params(
426 media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
427 media::CHANNEL_LAYOUT_STEREO, 48000, 16, 480);
428 audio_processor->OnCaptureFormatChanged(source_params);
429 EXPECT_EQ(audio_processor->OutputFormat().channels(), 2);
431 // Construct left and right channels, and assign different values to the
432 // first data of the left channel and right channel.
433 const int size = media::AudioBus::CalculateMemorySize(source_params);
434 scoped_ptr<float, base::AlignedFreeDeleter> left_channel(
435 static_cast<float*>(base::AlignedAlloc(size, 32)));
436 scoped_ptr<float, base::AlignedFreeDeleter> right_channel(
437 static_cast<float*>(base::AlignedAlloc(size, 32)));
438 scoped_ptr<media::AudioBus> wrapper = media::AudioBus::CreateWrapper(
439 source_params.channels());
440 wrapper->set_frames(source_params.frames_per_buffer());
441 wrapper->SetChannelData(0, left_channel.get());
442 wrapper->SetChannelData(1, right_channel.get());
443 wrapper->Zero();
444 float* left_channel_ptr = left_channel.get();
445 left_channel_ptr[0] = 1.0f;
447 // A audio bus used for verifying the output data values.
448 scoped_ptr<media::AudioBus> output_bus = media::AudioBus::Create(
449 audio_processor->OutputFormat());
451 // Run the test consecutively to make sure the stereo channels are not
452 // flipped back and forth.
453 static const int kNumberOfPacketsForTest = 100;
454 for (int i = 0; i < kNumberOfPacketsForTest; ++i) {
455 audio_processor->PushCaptureData(wrapper.get());
457 int16* output = NULL;
458 int new_volume = 0;
459 EXPECT_TRUE(audio_processor->ProcessAndConsumeData(
460 base::TimeDelta::FromMilliseconds(0), 0, false, &new_volume, &output));
461 output_bus->FromInterleaved(output, output_bus->frames(), 2);
462 EXPECT_TRUE(output != NULL);
463 EXPECT_EQ(output_bus->channel(0)[0], 0);
464 EXPECT_NE(output_bus->channel(1)[0], 0);
467 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
468 // |audio_processor|.
469 audio_processor = NULL;
472 } // namespace content