IndexedDBFactory now ForceCloses databases.
[chromium-blink-merge.git] / content / browser / speech / speech_recognition_browsertest.cc
blobf97d222712b6a246c4a8cbcb04d0f54f0b14ef51
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <list>
7 #include "base/bind.h"
8 #include "base/memory/scoped_ptr.h"
9 #include "base/run_loop.h"
10 #include "base/strings/utf_string_conversions.h"
11 #include "content/browser/speech/google_streaming_remote_engine.h"
12 #include "content/browser/speech/speech_recognition_manager_impl.h"
13 #include "content/browser/speech/speech_recognizer_impl.h"
14 #include "content/public/browser/browser_thread.h"
15 #include "content/public/browser/notification_types.h"
16 #include "content/public/browser/web_contents.h"
17 #include "content/public/test/browser_test_utils.h"
18 #include "content/public/test/test_utils.h"
19 #include "content/shell/browser/shell.h"
20 #include "content/test/content_browser_test.h"
21 #include "content/test/content_browser_test_utils.h"
22 #include "content/test/mock_google_streaming_server.h"
23 #include "media/audio/mock_audio_manager.h"
24 #include "media/audio/test_audio_input_controller_factory.h"
25 #include "testing/gtest/include/gtest/gtest.h"
27 using base::RunLoop;
29 namespace content {
31 class SpeechRecognitionBrowserTest :
32 public ContentBrowserTest,
33 public MockGoogleStreamingServer::Delegate,
34 public media::TestAudioInputControllerDelegate {
35 public:
36 enum StreamingServerState {
37 kIdle,
38 kTestAudioControllerOpened,
39 kClientConnected,
40 kClientAudioUpload,
41 kClientAudioUploadComplete,
42 kTestAudioControllerClosed,
43 kClientDisconnected
46 // MockGoogleStreamingServerDelegate methods.
47 virtual void OnClientConnected() OVERRIDE {
48 ASSERT_EQ(kTestAudioControllerOpened, streaming_server_state_);
49 streaming_server_state_ = kClientConnected;
52 virtual void OnClientAudioUpload() OVERRIDE {
53 if (streaming_server_state_ == kClientConnected)
54 streaming_server_state_ = kClientAudioUpload;
57 virtual void OnClientAudioUploadComplete() OVERRIDE {
58 ASSERT_EQ(kTestAudioControllerClosed, streaming_server_state_);
59 streaming_server_state_ = kClientAudioUploadComplete;
62 virtual void OnClientDisconnected() OVERRIDE {
63 ASSERT_EQ(kClientAudioUploadComplete, streaming_server_state_);
64 streaming_server_state_ = kClientDisconnected;
67 // media::TestAudioInputControllerDelegate methods.
68 virtual void TestAudioControllerOpened(
69 media::TestAudioInputController* controller) OVERRIDE {
70 ASSERT_EQ(kIdle, streaming_server_state_);
71 streaming_server_state_ = kTestAudioControllerOpened;
72 const int capture_packet_interval_ms =
73 (1000 * controller->audio_parameters().frames_per_buffer()) /
74 controller->audio_parameters().sample_rate();
75 ASSERT_EQ(GoogleStreamingRemoteEngine::kAudioPacketIntervalMs,
76 capture_packet_interval_ms);
77 FeedAudioController(500 /* ms */, /*noise=*/ false);
78 FeedAudioController(1000 /* ms */, /*noise=*/ true);
79 FeedAudioController(1000 /* ms */, /*noise=*/ false);
82 virtual void TestAudioControllerClosed(
83 media::TestAudioInputController* controller) OVERRIDE {
84 ASSERT_EQ(kClientAudioUpload, streaming_server_state_);
85 streaming_server_state_ = kTestAudioControllerClosed;
86 mock_streaming_server_->MockGoogleStreamingServer::SimulateResult(
87 GetGoodSpeechResult());
90 // Helper methods used by test fixtures.
91 GURL GetTestUrlFromFragment(const std::string fragment) {
92 return GURL(GetTestUrl("speech", "web_speech_recognition.html").spec() +
93 "#" + fragment);
96 std::string GetPageFragment() {
97 return shell()->web_contents()->GetURL().ref();
100 const StreamingServerState &streaming_server_state() {
101 return streaming_server_state_;
104 protected:
105 // ContentBrowserTest methods.
106 virtual void SetUpInProcessBrowserTestFixture() OVERRIDE {
107 test_audio_input_controller_factory_.set_delegate(this);
108 media::AudioInputController::set_factory_for_testing(
109 &test_audio_input_controller_factory_);
110 mock_streaming_server_.reset(new MockGoogleStreamingServer(this));
111 streaming_server_state_ = kIdle;
114 virtual void SetUpOnMainThread() OVERRIDE {
115 ASSERT_TRUE(SpeechRecognitionManagerImpl::GetInstance());
116 SpeechRecognizerImpl::SetAudioManagerForTesting(
117 new media::MockAudioManager(BrowserThread::GetMessageLoopProxyForThread(
118 BrowserThread::IO)));
121 virtual void TearDownOnMainThread() OVERRIDE {
122 SpeechRecognizerImpl::SetAudioManagerForTesting(NULL);
125 virtual void TearDownInProcessBrowserTestFixture() OVERRIDE {
126 test_audio_input_controller_factory_.set_delegate(NULL);
127 mock_streaming_server_.reset();
130 private:
131 static void FeedSingleBufferToAudioController(
132 scoped_refptr<media::TestAudioInputController> controller,
133 size_t buffer_size,
134 bool fill_with_noise) {
135 DCHECK(controller.get());
136 scoped_ptr<uint8[]> audio_buffer(new uint8[buffer_size]);
137 if (fill_with_noise) {
138 for (size_t i = 0; i < buffer_size; ++i)
139 audio_buffer[i] = static_cast<uint8>(127 * sin(i * 3.14F /
140 (16 * buffer_size)));
141 } else {
142 memset(audio_buffer.get(), 0, buffer_size);
144 controller->event_handler()->OnData(controller,
145 audio_buffer.get(),
146 buffer_size);
149 void FeedAudioController(int duration_ms, bool feed_with_noise) {
150 media::TestAudioInputController* controller =
151 test_audio_input_controller_factory_.controller();
152 ASSERT_TRUE(controller);
153 const media::AudioParameters& audio_params = controller->audio_parameters();
154 const size_t buffer_size = audio_params.GetBytesPerBuffer();
155 const int ms_per_buffer = audio_params.frames_per_buffer() * 1000 /
156 audio_params.sample_rate();
157 // We can only simulate durations that are integer multiples of the
158 // buffer size. In this regard see
159 // SpeechRecognitionEngine::GetDesiredAudioChunkDurationMs().
160 ASSERT_EQ(0, duration_ms % ms_per_buffer);
162 const int n_buffers = duration_ms / ms_per_buffer;
163 for (int i = 0; i < n_buffers; ++i) {
164 base::MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
165 &FeedSingleBufferToAudioController,
166 scoped_refptr<media::TestAudioInputController>(controller),
167 buffer_size,
168 feed_with_noise));
172 SpeechRecognitionResult GetGoodSpeechResult() {
173 SpeechRecognitionResult result;
174 result.hypotheses.push_back(SpeechRecognitionHypothesis(
175 base::UTF8ToUTF16("Pictures of the moon"), 1.0F));
176 return result;
179 StreamingServerState streaming_server_state_;
180 scoped_ptr<MockGoogleStreamingServer> mock_streaming_server_;
181 media::TestAudioInputControllerFactory test_audio_input_controller_factory_;
184 // Simply loads the test page and checks if it was able to create a Speech
185 // Recognition object in JavaScript, to make sure the Web Speech API is enabled.
186 IN_PROC_BROWSER_TEST_F(SpeechRecognitionBrowserTest, Precheck) {
187 NavigateToURLBlockUntilNavigationsComplete(
188 shell(), GetTestUrlFromFragment("precheck"), 2);
190 EXPECT_EQ(kIdle, streaming_server_state());
191 EXPECT_EQ("success", GetPageFragment());
194 IN_PROC_BROWSER_TEST_F(SpeechRecognitionBrowserTest, OneShotRecognition) {
195 NavigateToURLBlockUntilNavigationsComplete(
196 shell(), GetTestUrlFromFragment("oneshot"), 2);
198 EXPECT_EQ(kClientDisconnected, streaming_server_state());
199 EXPECT_EQ("goodresult1", GetPageFragment());
202 } // namespace content