1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/environment.h"
6 #include "base/test/test_timeouts.h"
7 #include "content/renderer/media/webrtc_audio_capturer.h"
8 #include "content/renderer/media/webrtc_audio_device_impl.h"
9 #include "content/renderer/media/webrtc_audio_renderer.h"
10 #include "content/renderer/render_thread_impl.h"
11 #include "content/test/webrtc_audio_device_test.h"
12 #include "media/audio/audio_manager_base.h"
13 #include "media/base/audio_hardware_config.h"
14 #include "testing/gmock/include/gmock/gmock.h"
15 #include "third_party/webrtc/voice_engine/include/voe_audio_processing.h"
16 #include "third_party/webrtc/voice_engine/include/voe_base.h"
17 #include "third_party/webrtc/voice_engine/include/voe_external_media.h"
18 #include "third_party/webrtc/voice_engine/include/voe_file.h"
19 #include "third_party/webrtc/voice_engine/include/voe_network.h"
22 #include "base/win/windows_version.h"
25 using media::AudioParameters
;
27 using testing::AnyNumber
;
28 using testing::InvokeWithoutArgs
;
29 using testing::Return
;
36 const int kRenderViewId
= 1;
38 scoped_ptr
<media::AudioHardwareConfig
> CreateRealHardwareConfig(
39 media::AudioManager
* manager
) {
40 const AudioParameters output_parameters
=
41 manager
->GetDefaultOutputStreamParameters();
42 const AudioParameters input_parameters
=
43 manager
->GetInputStreamParameters(
44 media::AudioManagerBase::kDefaultDeviceId
);
45 return make_scoped_ptr(new media::AudioHardwareConfig(
46 input_parameters
, output_parameters
));
49 // Return true if at least one element in the array matches |value|.
50 bool FindElementInArray(const int* array
, int size
, int value
) {
51 return (std::find(&array
[0], &array
[0] + size
, value
) != &array
[size
]);
54 // This method returns false if a non-supported rate is detected on the
55 // input or output side.
56 // TODO(henrika): add support for automatic fallback to Windows Wave audio
57 // if a non-supported rate is detected. It is probably better to detect
58 // invalid audio settings by actually trying to open the audio streams instead
59 // of relying on hard coded conditions.
60 bool HardwareSampleRatesAreValid() {
61 // These are the currently supported hardware sample rates in both directions.
62 // The actual WebRTC client can limit these ranges further depending on
63 // platform but this is the maximum range we support today.
64 int valid_input_rates
[] = {16000, 32000, 44100, 48000, 96000};
65 int valid_output_rates
[] = {16000, 32000, 44100, 48000, 96000};
67 media::AudioHardwareConfig
* hardware_config
=
68 RenderThreadImpl::current()->GetAudioHardwareConfig();
70 // Verify the input sample rate.
71 int input_sample_rate
= hardware_config
->GetInputSampleRate();
73 if (!FindElementInArray(valid_input_rates
, arraysize(valid_input_rates
),
75 LOG(WARNING
) << "Non-supported input sample rate detected.";
79 // Given that the input rate was OK, verify the output rate as well.
80 int output_sample_rate
= hardware_config
->GetOutputSampleRate();
81 if (!FindElementInArray(valid_output_rates
, arraysize(valid_output_rates
),
82 output_sample_rate
)) {
83 LOG(WARNING
) << "Non-supported output sample rate detected.";
90 // Utility method which initializes the audio capturer contained in the
91 // WebRTC audio device. This method should be used in tests where
92 // HardwareSampleRatesAreValid() has been called and returned true.
93 bool InitializeCapturer(WebRtcAudioDeviceImpl
* webrtc_audio_device
) {
94 // Access the capturer owned and created by the audio device.
95 WebRtcAudioCapturer
* capturer
= webrtc_audio_device
->capturer();
99 media::AudioHardwareConfig
* hardware_config
=
100 RenderThreadImpl::current()->GetAudioHardwareConfig();
102 // Use native capture sample rate and channel configuration to get some
103 // action in this test.
104 int sample_rate
= hardware_config
->GetInputSampleRate();
105 media::ChannelLayout channel_layout
=
106 hardware_config
->GetInputChannelLayout();
107 if (!capturer
->Initialize(channel_layout
, sample_rate
, 1))
114 class WebRTCMediaProcessImpl
: public webrtc::VoEMediaProcess
{
116 explicit WebRTCMediaProcessImpl(base::WaitableEvent
* event
)
119 type_(webrtc::kPlaybackPerChannel
),
124 virtual ~WebRTCMediaProcessImpl() {}
126 // TODO(henrika): Refactor in WebRTC and convert to Chrome coding style.
127 virtual void Process(const int channel
,
128 const webrtc::ProcessingTypes type
,
129 WebRtc_Word16 audio_10ms
[],
131 const int sampling_freq
,
132 const bool is_stereo
) OVERRIDE
{
133 base::AutoLock
auto_lock(lock_
);
134 channel_id_
= channel
;
136 packet_size_
= length
;
137 sample_rate_
= sampling_freq
;
138 channels_
= (is_stereo
? 2 : 1);
140 // Signal that a new callback has been received.
145 int channel_id() const {
146 base::AutoLock
auto_lock(lock_
);
151 base::AutoLock
auto_lock(lock_
);
155 int packet_size() const {
156 base::AutoLock
auto_lock(lock_
);
160 int sample_rate() const {
161 base::AutoLock
auto_lock(lock_
);
166 base::WaitableEvent
* event_
;
168 webrtc::ProcessingTypes type_
;
172 mutable base::Lock lock_
;
173 DISALLOW_COPY_AND_ASSIGN(WebRTCMediaProcessImpl
);
178 // Trivial test which verifies that one part of the test harness
179 // (HardwareSampleRatesAreValid()) works as intended for all supported
180 // hardware input sample rates.
181 TEST_F(WebRTCAudioDeviceTest
, TestValidInputRates
) {
182 int valid_rates
[] = {16000, 32000, 44100, 48000, 96000};
184 // Verify that we will approve all rates listed in |valid_rates|.
185 for (size_t i
= 0; i
< arraysize(valid_rates
); ++i
) {
186 EXPECT_TRUE(FindElementInArray(valid_rates
, arraysize(valid_rates
),
190 // Verify that any value outside the valid range results in negative
192 int invalid_rates
[] = {-1, 0, 8000, 11025, 22050, 192000};
193 for (size_t i
= 0; i
< arraysize(invalid_rates
); ++i
) {
194 EXPECT_FALSE(FindElementInArray(valid_rates
, arraysize(valid_rates
),
199 // Trivial test which verifies that one part of the test harness
200 // (HardwareSampleRatesAreValid()) works as intended for all supported
201 // hardware output sample rates.
202 TEST_F(WebRTCAudioDeviceTest
, TestValidOutputRates
) {
203 int valid_rates
[] = {44100, 48000, 96000};
205 // Verify that we will approve all rates listed in |valid_rates|.
206 for (size_t i
= 0; i
< arraysize(valid_rates
); ++i
) {
207 EXPECT_TRUE(FindElementInArray(valid_rates
, arraysize(valid_rates
),
211 // Verify that any value outside the valid range results in negative
213 int invalid_rates
[] = {-1, 0, 8000, 11025, 22050, 32000, 192000};
214 for (size_t i
= 0; i
< arraysize(invalid_rates
); ++i
) {
215 EXPECT_FALSE(FindElementInArray(valid_rates
, arraysize(valid_rates
),
220 // Basic test that instantiates and initializes an instance of
221 // WebRtcAudioDeviceImpl.
222 TEST_F(WebRTCAudioDeviceTest
, Construct
) {
224 // This test crashes on Win XP bots.
225 if (base::win::GetVersion() <= base::win::VERSION_XP
)
229 AudioParameters
input_params(
230 AudioParameters::AUDIO_PCM_LOW_LATENCY
,
231 media::CHANNEL_LAYOUT_MONO
,
236 AudioParameters
output_params(
237 AudioParameters::AUDIO_PCM_LOW_LATENCY
,
238 media::CHANNEL_LAYOUT_STEREO
,
243 media::AudioHardwareConfig
audio_config(input_params
, output_params
);
244 SetAudioHardwareConfig(&audio_config
);
246 scoped_refptr
<WebRtcAudioDeviceImpl
> webrtc_audio_device(
247 new WebRtcAudioDeviceImpl());
249 // The capturer is not created until after the WebRtcAudioDeviceImpl has
251 EXPECT_FALSE(InitializeCapturer(webrtc_audio_device
.get()));
253 WebRTCAutoDelete
<webrtc::VoiceEngine
> engine(webrtc::VoiceEngine::Create());
254 ASSERT_TRUE(engine
.valid());
256 ScopedWebRTCPtr
<webrtc::VoEBase
> base(engine
.get());
257 int err
= base
->Init(webrtc_audio_device
);
258 EXPECT_TRUE(InitializeCapturer(webrtc_audio_device
.get()));
260 EXPECT_EQ(0, base
->Terminate());
263 // Verify that a call to webrtc::VoEBase::StartPlayout() starts audio output
264 // with the correct set of parameters. A WebRtcAudioDeviceImpl instance will
265 // be utilized to implement the actual audio path. The test registers a
266 // webrtc::VoEExternalMedia implementation to hijack the output audio and
267 // verify that streaming starts correctly.
268 // Disabled when running headless since the bots don't have the required config.
269 // Flaky, http://crbug.com/167299 .
270 TEST_F(WebRTCAudioDeviceTest
, DISABLED_StartPlayout
) {
271 if (!has_output_devices_
) {
272 LOG(WARNING
) << "No output device detected.";
276 scoped_ptr
<media::AudioHardwareConfig
> config
=
277 CreateRealHardwareConfig(audio_manager_
.get());
278 SetAudioHardwareConfig(config
.get());
280 if (!HardwareSampleRatesAreValid())
283 EXPECT_CALL(media_observer(),
284 OnSetAudioStreamStatus(_
, 1, StrEq("created"))).Times(1);
285 EXPECT_CALL(media_observer(),
286 OnSetAudioStreamPlaying(_
, 1, true)).Times(1);
287 EXPECT_CALL(media_observer(),
288 OnSetAudioStreamStatus(_
, 1, StrEq("closed"))).Times(1);
289 EXPECT_CALL(media_observer(),
290 OnDeleteAudioStream(_
, 1)).Times(AnyNumber());
292 scoped_refptr
<WebRtcAudioRenderer
> renderer
=
293 new WebRtcAudioRenderer(kRenderViewId
);
294 scoped_refptr
<WebRtcAudioDeviceImpl
> webrtc_audio_device(
295 new WebRtcAudioDeviceImpl());
296 EXPECT_TRUE(webrtc_audio_device
->SetAudioRenderer(renderer
));
298 WebRTCAutoDelete
<webrtc::VoiceEngine
> engine(webrtc::VoiceEngine::Create());
299 ASSERT_TRUE(engine
.valid());
301 ScopedWebRTCPtr
<webrtc::VoEBase
> base(engine
.get());
302 ASSERT_TRUE(base
.valid());
303 int err
= base
->Init(webrtc_audio_device
);
306 int ch
= base
->CreateChannel();
309 ScopedWebRTCPtr
<webrtc::VoEExternalMedia
> external_media(engine
.get());
310 ASSERT_TRUE(external_media
.valid());
312 base::WaitableEvent
event(false, false);
313 scoped_ptr
<WebRTCMediaProcessImpl
> media_process(
314 new WebRTCMediaProcessImpl(&event
));
315 EXPECT_EQ(0, external_media
->RegisterExternalMediaProcessing(
316 ch
, webrtc::kPlaybackPerChannel
, *media_process
.get()));
318 EXPECT_EQ(0, base
->StartPlayout(ch
));
321 EXPECT_TRUE(event
.TimedWait(TestTimeouts::action_timeout()));
322 WaitForIOThreadCompletion();
324 EXPECT_TRUE(webrtc_audio_device
->Playing());
325 EXPECT_FALSE(webrtc_audio_device
->Recording());
326 EXPECT_EQ(ch
, media_process
->channel_id());
327 EXPECT_EQ(webrtc::kPlaybackPerChannel
, media_process
->type());
328 EXPECT_EQ(80, media_process
->packet_size());
329 EXPECT_EQ(8000, media_process
->sample_rate());
331 EXPECT_EQ(0, external_media
->DeRegisterExternalMediaProcessing(
332 ch
, webrtc::kPlaybackPerChannel
));
333 EXPECT_EQ(0, base
->StopPlayout(ch
));
336 EXPECT_EQ(0, base
->DeleteChannel(ch
));
337 EXPECT_EQ(0, base
->Terminate());
340 // Verify that a call to webrtc::VoEBase::StartRecording() starts audio input
341 // with the correct set of parameters. A WebRtcAudioDeviceImpl instance will
342 // be utilized to implement the actual audio path. The test registers a
343 // webrtc::VoEExternalMedia implementation to hijack the input audio and
344 // verify that streaming starts correctly. An external transport implementation
345 // is also required to ensure that "sending" can start without actually trying
346 // to send encoded packets to the network. Our main interest here is to ensure
347 // that the audio capturing starts as it should.
348 // Disabled when running headless since the bots don't have the required config.
350 // TODO(leozwang): Because ExternalMediaProcessing is disabled in webrtc,
351 // disable this unit test on Android for now.
352 #if defined(OS_ANDROID)
353 #define MAYBE_StartRecording DISABLED_StartRecording
355 #define MAYBE_StartRecording StartRecording
357 TEST_F(WebRTCAudioDeviceTest
, MAYBE_StartRecording
) {
358 if (!has_input_devices_
|| !has_output_devices_
) {
359 LOG(WARNING
) << "Missing audio devices.";
363 scoped_ptr
<media::AudioHardwareConfig
> config
=
364 CreateRealHardwareConfig(audio_manager_
.get());
365 SetAudioHardwareConfig(config
.get());
367 if (!HardwareSampleRatesAreValid())
370 // TODO(tommi): extend MediaObserver and MockMediaObserver with support
371 // for new interfaces, like OnSetAudioStreamRecording(). When done, add
372 // EXPECT_CALL() macros here.
373 scoped_refptr
<WebRtcAudioDeviceImpl
> webrtc_audio_device(
374 new WebRtcAudioDeviceImpl());
376 WebRTCAutoDelete
<webrtc::VoiceEngine
> engine(webrtc::VoiceEngine::Create());
377 ASSERT_TRUE(engine
.valid());
379 ScopedWebRTCPtr
<webrtc::VoEBase
> base(engine
.get());
380 ASSERT_TRUE(base
.valid());
381 int err
= base
->Init(webrtc_audio_device
);
384 EXPECT_TRUE(InitializeCapturer(webrtc_audio_device
.get()));
385 webrtc_audio_device
->capturer()->Start();
387 int ch
= base
->CreateChannel();
390 ScopedWebRTCPtr
<webrtc::VoEExternalMedia
> external_media(engine
.get());
391 ASSERT_TRUE(external_media
.valid());
393 base::WaitableEvent
event(false, false);
394 scoped_ptr
<WebRTCMediaProcessImpl
> media_process(
395 new WebRTCMediaProcessImpl(&event
));
396 EXPECT_EQ(0, external_media
->RegisterExternalMediaProcessing(
397 ch
, webrtc::kRecordingPerChannel
, *media_process
.get()));
399 // We must add an external transport implementation to be able to start
400 // recording without actually sending encoded packets to the network. All
401 // we want to do here is to verify that audio capturing starts as it should.
402 ScopedWebRTCPtr
<webrtc::VoENetwork
> network(engine
.get());
403 scoped_ptr
<WebRTCTransportImpl
> transport(
404 new WebRTCTransportImpl(network
.get()));
405 EXPECT_EQ(0, network
->RegisterExternalTransport(ch
, *transport
.get()));
406 EXPECT_EQ(0, base
->StartSend(ch
));
408 EXPECT_TRUE(event
.TimedWait(TestTimeouts::action_timeout()));
409 WaitForIOThreadCompletion();
411 EXPECT_FALSE(webrtc_audio_device
->Playing());
412 EXPECT_TRUE(webrtc_audio_device
->Recording());
413 EXPECT_EQ(ch
, media_process
->channel_id());
414 EXPECT_EQ(webrtc::kRecordingPerChannel
, media_process
->type());
415 EXPECT_EQ(80, media_process
->packet_size());
416 EXPECT_EQ(8000, media_process
->sample_rate());
418 EXPECT_EQ(0, external_media
->DeRegisterExternalMediaProcessing(
419 ch
, webrtc::kRecordingPerChannel
));
420 EXPECT_EQ(0, base
->StopSend(ch
));
422 webrtc_audio_device
->capturer()->Stop();
423 EXPECT_EQ(0, base
->DeleteChannel(ch
));
424 EXPECT_EQ(0, base
->Terminate());
427 // Uses WebRtcAudioDeviceImpl to play a local wave file.
428 // Disabled when running headless since the bots don't have the required config.
429 // Flaky, http://crbug.com/167298 .
430 TEST_F(WebRTCAudioDeviceTest
, DISABLED_PlayLocalFile
) {
431 if (!has_output_devices_
) {
432 LOG(WARNING
) << "No output device detected.";
436 std::string
file_path(
437 GetTestDataPath(FILE_PATH_LITERAL("speechmusic_mono_16kHz.pcm")));
439 scoped_ptr
<media::AudioHardwareConfig
> config
=
440 CreateRealHardwareConfig(audio_manager_
.get());
441 SetAudioHardwareConfig(config
.get());
443 if (!HardwareSampleRatesAreValid())
446 EXPECT_CALL(media_observer(),
447 OnSetAudioStreamStatus(_
, 1, StrEq("created"))).Times(1);
448 EXPECT_CALL(media_observer(),
449 OnSetAudioStreamPlaying(_
, 1, true)).Times(1);
450 EXPECT_CALL(media_observer(),
451 OnSetAudioStreamStatus(_
, 1, StrEq("closed"))).Times(1);
452 EXPECT_CALL(media_observer(),
453 OnDeleteAudioStream(_
, 1)).Times(AnyNumber());
455 scoped_refptr
<WebRtcAudioRenderer
> renderer
=
456 new WebRtcAudioRenderer(kRenderViewId
);
457 scoped_refptr
<WebRtcAudioDeviceImpl
> webrtc_audio_device(
458 new WebRtcAudioDeviceImpl());
459 EXPECT_TRUE(webrtc_audio_device
->SetAudioRenderer(renderer
));
461 WebRTCAutoDelete
<webrtc::VoiceEngine
> engine(webrtc::VoiceEngine::Create());
462 ASSERT_TRUE(engine
.valid());
464 ScopedWebRTCPtr
<webrtc::VoEBase
> base(engine
.get());
465 ASSERT_TRUE(base
.valid());
466 int err
= base
->Init(webrtc_audio_device
);
469 int ch
= base
->CreateChannel();
471 EXPECT_EQ(0, base
->StartPlayout(ch
));
474 ScopedWebRTCPtr
<webrtc::VoEFile
> file(engine
.get());
475 ASSERT_TRUE(file
.valid());
477 EXPECT_EQ(0, file
->GetFileDuration(file_path
.c_str(), duration
,
478 webrtc::kFileFormatPcm16kHzFile
));
479 EXPECT_NE(0, duration
);
481 EXPECT_EQ(0, file
->StartPlayingFileLocally(ch
, file_path
.c_str(), false,
482 webrtc::kFileFormatPcm16kHzFile
));
484 // Play 2 seconds worth of audio and then quit.
485 message_loop_
.PostDelayedTask(FROM_HERE
,
486 MessageLoop::QuitClosure(),
487 base::TimeDelta::FromSeconds(6));
491 EXPECT_EQ(0, base
->StopSend(ch
));
492 EXPECT_EQ(0, base
->StopPlayout(ch
));
493 EXPECT_EQ(0, base
->DeleteChannel(ch
));
494 EXPECT_EQ(0, base
->Terminate());
497 // Uses WebRtcAudioDeviceImpl to play out recorded audio in loopback.
498 // An external transport implementation is utilized to feed back RTP packets
499 // which are recorded, encoded, packetized into RTP packets and finally
500 // "transmitted". The RTP packets are then fed back into the VoiceEngine
501 // where they are decoded and played out on the default audio output device.
502 // Disabled when running headless since the bots don't have the required config.
503 // TODO(henrika): improve quality by using a wideband codec, enabling noise-
505 // FullDuplexAudioWithAGC is flaky on Android, disable it for now.
506 #if defined(OS_ANDROID)
507 #define MAYBE_FullDuplexAudioWithAGC DISABLED_FullDuplexAudioWithAGC
509 #define MAYBE_FullDuplexAudioWithAGC FullDuplexAudioWithAGC
511 TEST_F(WebRTCAudioDeviceTest
, MAYBE_FullDuplexAudioWithAGC
) {
512 if (!has_output_devices_
|| !has_input_devices_
) {
513 LOG(WARNING
) << "Missing audio devices.";
517 scoped_ptr
<media::AudioHardwareConfig
> config
=
518 CreateRealHardwareConfig(audio_manager_
.get());
519 SetAudioHardwareConfig(config
.get());
521 if (!HardwareSampleRatesAreValid())
524 EXPECT_CALL(media_observer(),
525 OnSetAudioStreamStatus(_
, 1, StrEq("created")));
526 EXPECT_CALL(media_observer(),
527 OnSetAudioStreamPlaying(_
, 1, true));
528 EXPECT_CALL(media_observer(),
529 OnSetAudioStreamStatus(_
, 1, StrEq("closed")));
530 EXPECT_CALL(media_observer(),
531 OnDeleteAudioStream(_
, 1)).Times(AnyNumber());
533 scoped_refptr
<WebRtcAudioRenderer
> renderer
=
534 new WebRtcAudioRenderer(kRenderViewId
);
535 scoped_refptr
<WebRtcAudioDeviceImpl
> webrtc_audio_device(
536 new WebRtcAudioDeviceImpl());
537 EXPECT_TRUE(webrtc_audio_device
->SetAudioRenderer(renderer
));
539 WebRTCAutoDelete
<webrtc::VoiceEngine
> engine(webrtc::VoiceEngine::Create());
540 ASSERT_TRUE(engine
.valid());
542 ScopedWebRTCPtr
<webrtc::VoEBase
> base(engine
.get());
543 ASSERT_TRUE(base
.valid());
544 int err
= base
->Init(webrtc_audio_device
);
547 EXPECT_TRUE(InitializeCapturer(webrtc_audio_device
.get()));
548 webrtc_audio_device
->capturer()->Start();
550 ScopedWebRTCPtr
<webrtc::VoEAudioProcessing
> audio_processing(engine
.get());
551 ASSERT_TRUE(audio_processing
.valid());
552 #if defined(OS_ANDROID)
553 // On Android, by default AGC is off.
555 webrtc::AgcModes agc_mode
= webrtc::kAgcDefault
;
556 EXPECT_EQ(0, audio_processing
->GetAgcStatus(enabled
, agc_mode
));
557 EXPECT_FALSE(enabled
);
559 bool enabled
= false;
560 webrtc::AgcModes agc_mode
= webrtc::kAgcDefault
;
561 EXPECT_EQ(0, audio_processing
->GetAgcStatus(enabled
, agc_mode
));
562 EXPECT_TRUE(enabled
);
563 EXPECT_EQ(agc_mode
, webrtc::kAgcAdaptiveAnalog
);
566 int ch
= base
->CreateChannel();
569 ScopedWebRTCPtr
<webrtc::VoENetwork
> network(engine
.get());
570 ASSERT_TRUE(network
.valid());
571 scoped_ptr
<WebRTCTransportImpl
> transport(
572 new WebRTCTransportImpl(network
.get()));
573 EXPECT_EQ(0, network
->RegisterExternalTransport(ch
, *transport
.get()));
574 EXPECT_EQ(0, base
->StartPlayout(ch
));
575 EXPECT_EQ(0, base
->StartSend(ch
));
578 LOG(INFO
) << ">> You should now be able to hear yourself in loopback...";
579 message_loop_
.PostDelayedTask(FROM_HERE
,
580 MessageLoop::QuitClosure(),
581 base::TimeDelta::FromSeconds(2));
584 webrtc_audio_device
->capturer()->Stop();
586 EXPECT_EQ(0, base
->StopSend(ch
));
587 EXPECT_EQ(0, base
->StopPlayout(ch
));
589 EXPECT_EQ(0, base
->DeleteChannel(ch
));
590 EXPECT_EQ(0, base
->Terminate());
593 } // namespace content