[content shell] implement testRunner.overridePreference
[chromium-blink-merge.git] / content / renderer / speech_recognition_dispatcher.cc
blob2304f5dce979331902a28d98adcfe729326f1c81
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/renderer/speech_recognition_dispatcher.h"
7 #include "base/basictypes.h"
8 #include "base/utf_string_conversions.h"
9 #include "content/common/speech_recognition_messages.h"
10 #include "content/renderer/render_view_impl.h"
11 #include "third_party/WebKit/Source/WebKit/chromium/public/platform/WebString.h"
12 #include "third_party/WebKit/Source/WebKit/chromium/public/platform/WebVector.h"
13 #include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechGrammar.h"
14 #include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognitionParams.h"
15 #include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognitionResult.h"
16 #include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognizerClient.h"
18 using WebKit::WebVector;
19 using WebKit::WebString;
20 using WebKit::WebSpeechGrammar;
21 using WebKit::WebSpeechRecognitionHandle;
22 using WebKit::WebSpeechRecognitionResult;
23 using WebKit::WebSpeechRecognitionParams;
24 using WebKit::WebSpeechRecognizerClient;
26 namespace content {
28 SpeechRecognitionDispatcher::SpeechRecognitionDispatcher(
29 RenderViewImpl* render_view)
30 : RenderViewObserver(render_view),
31 recognizer_client_(NULL),
32 next_id_(1) {
35 SpeechRecognitionDispatcher::~SpeechRecognitionDispatcher() {
38 bool SpeechRecognitionDispatcher::OnMessageReceived(
39 const IPC::Message& message) {
40 bool handled = true;
41 IPC_BEGIN_MESSAGE_MAP(SpeechRecognitionDispatcher, message)
42 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Started, OnRecognitionStarted)
43 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioStarted, OnAudioStarted)
44 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundStarted, OnSoundStarted)
45 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundEnded, OnSoundEnded)
46 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioEnded, OnAudioEnded)
47 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ErrorOccurred, OnErrorOccurred)
48 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Ended, OnRecognitionEnded)
49 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ResultRetrieved,
50 OnResultsRetrieved)
51 IPC_MESSAGE_UNHANDLED(handled = false)
52 IPC_END_MESSAGE_MAP()
53 return handled;
56 void SpeechRecognitionDispatcher::start(
57 const WebSpeechRecognitionHandle& handle,
58 const WebSpeechRecognitionParams& params,
59 WebSpeechRecognizerClient* recognizer_client) {
60 DCHECK(!recognizer_client_ || recognizer_client_ == recognizer_client);
61 recognizer_client_ = recognizer_client;
63 SpeechRecognitionHostMsg_StartRequest_Params msg_params;
64 for (size_t i = 0; i < params.grammars().size(); ++i) {
65 const WebSpeechGrammar& grammar = params.grammars()[i];
66 msg_params.grammars.push_back(
67 SpeechRecognitionGrammar(grammar.src().spec(), grammar.weight()));
69 msg_params.language = UTF16ToUTF8(params.language());
70 msg_params.max_hypotheses = static_cast<uint32>(params.maxAlternatives());
71 msg_params.continuous = params.continuous();
72 msg_params.interim_results = params.interimResults();
73 msg_params.origin_url = params.origin().toString().utf8();
74 msg_params.render_view_id = routing_id();
75 msg_params.request_id = GetOrCreateIDForHandle(handle);
76 // The handle mapping will be removed in |OnRecognitionEnd|.
77 Send(new SpeechRecognitionHostMsg_StartRequest(msg_params));
80 void SpeechRecognitionDispatcher::stop(
81 const WebSpeechRecognitionHandle& handle,
82 WebSpeechRecognizerClient* recognizer_client) {
83 // Ignore a |stop| issued without a matching |start|.
84 if (recognizer_client_ != recognizer_client || !HandleExists(handle))
85 return;
86 Send(new SpeechRecognitionHostMsg_StopCaptureRequest(
87 routing_id(), GetOrCreateIDForHandle(handle)));
90 void SpeechRecognitionDispatcher::abort(
91 const WebSpeechRecognitionHandle& handle,
92 WebSpeechRecognizerClient* recognizer_client) {
93 // Ignore an |abort| issued without a matching |start|.
94 if (recognizer_client_ != recognizer_client || !HandleExists(handle))
95 return;
96 Send(new SpeechRecognitionHostMsg_AbortRequest(
97 routing_id(), GetOrCreateIDForHandle(handle)));
100 void SpeechRecognitionDispatcher::OnRecognitionStarted(int request_id) {
101 recognizer_client_->didStart(GetHandleFromID(request_id));
104 void SpeechRecognitionDispatcher::OnAudioStarted(int request_id) {
105 recognizer_client_->didStartAudio(GetHandleFromID(request_id));
108 void SpeechRecognitionDispatcher::OnSoundStarted(int request_id) {
109 recognizer_client_->didStartSound(GetHandleFromID(request_id));
112 void SpeechRecognitionDispatcher::OnSoundEnded(int request_id) {
113 recognizer_client_->didEndSound(GetHandleFromID(request_id));
116 void SpeechRecognitionDispatcher::OnAudioEnded(int request_id) {
117 recognizer_client_->didEndAudio(GetHandleFromID(request_id));
120 static WebSpeechRecognizerClient::ErrorCode WebKitErrorCode(
121 SpeechRecognitionErrorCode e) {
122 switch (e) {
123 case SPEECH_RECOGNITION_ERROR_NONE:
124 NOTREACHED();
125 return WebSpeechRecognizerClient::OtherError;
126 case SPEECH_RECOGNITION_ERROR_ABORTED:
127 return WebSpeechRecognizerClient::AbortedError;
128 case SPEECH_RECOGNITION_ERROR_AUDIO:
129 return WebSpeechRecognizerClient::AudioCaptureError;
130 case SPEECH_RECOGNITION_ERROR_NETWORK:
131 return WebSpeechRecognizerClient::NetworkError;
132 case SPEECH_RECOGNITION_ERROR_NOT_ALLOWED:
133 return WebSpeechRecognizerClient::NotAllowedError;
134 case SPEECH_RECOGNITION_ERROR_NO_SPEECH:
135 return WebSpeechRecognizerClient::NoSpeechError;
136 case SPEECH_RECOGNITION_ERROR_NO_MATCH:
137 NOTREACHED();
138 return WebSpeechRecognizerClient::OtherError;
139 case SPEECH_RECOGNITION_ERROR_BAD_GRAMMAR:
140 return WebSpeechRecognizerClient::BadGrammarError;
142 NOTREACHED();
143 return WebSpeechRecognizerClient::OtherError;
146 void SpeechRecognitionDispatcher::OnErrorOccurred(
147 int request_id, const SpeechRecognitionError& error) {
148 if (error.code == SPEECH_RECOGNITION_ERROR_NO_MATCH) {
149 recognizer_client_->didReceiveNoMatch(GetHandleFromID(request_id),
150 WebSpeechRecognitionResult());
151 } else {
152 recognizer_client_->didReceiveError(GetHandleFromID(request_id),
153 WebString(), // TODO(primiano): message?
154 WebKitErrorCode(error.code));
158 void SpeechRecognitionDispatcher::OnRecognitionEnded(int request_id) {
159 // TODO(tommi): It is possible that the handle isn't found in the array if
160 // the user just refreshed the page. It seems that we then get a notification
161 // for the previously loaded instance of the page.
162 HandleMap::iterator iter = handle_map_.find(request_id);
163 if (iter == handle_map_.end()) {
164 DLOG(ERROR) << "OnRecognitionEnded called for a handle that doesn't exist";
165 } else {
166 WebSpeechRecognitionHandle handle = iter->second;
167 // Note: we need to erase the handle from the map *before* calling didEnd.
168 // didEnd may call back synchronously to start a new recognition session,
169 // and we don't want to delete the handle from the map after that happens.
170 handle_map_.erase(request_id);
171 recognizer_client_->didEnd(handle);
175 void SpeechRecognitionDispatcher::OnResultsRetrieved(
176 int request_id, const SpeechRecognitionResults& results) {
177 size_t provisional_count = 0;
178 SpeechRecognitionResults::const_iterator it = results.begin();
179 for (; it != results.end(); ++it) {
180 if (it->is_provisional)
181 ++provisional_count;
184 WebVector<WebSpeechRecognitionResult> provisional(provisional_count);
185 WebVector<WebSpeechRecognitionResult> final(
186 results.size() - provisional_count);
188 int provisional_index = 0, final_index = 0;
189 for (it = results.begin(); it != results.end(); ++it) {
190 const SpeechRecognitionResult& result = (*it);
191 WebSpeechRecognitionResult* webkit_result = result.is_provisional ?
192 &provisional[provisional_index++] : &final[final_index++];
194 const size_t num_hypotheses = result.hypotheses.size();
195 WebVector<WebString> transcripts(num_hypotheses);
196 WebVector<float> confidences(num_hypotheses);
197 for (size_t i = 0; i < num_hypotheses; ++i) {
198 transcripts[i] = result.hypotheses[i].utterance;
199 confidences[i] = static_cast<float>(result.hypotheses[i].confidence);
201 webkit_result->assign(transcripts, confidences, !result.is_provisional);
204 recognizer_client_->didReceiveResults(
205 GetHandleFromID(request_id), final, provisional);
209 int SpeechRecognitionDispatcher::GetOrCreateIDForHandle(
210 const WebSpeechRecognitionHandle& handle) {
211 // Search first for an existing mapping.
212 for (HandleMap::iterator iter = handle_map_.begin();
213 iter != handle_map_.end();
214 ++iter) {
215 if (iter->second.equals(handle))
216 return iter->first;
218 // If no existing mapping found, create a new one.
219 const int new_id = next_id_;
220 handle_map_[new_id] = handle;
221 ++next_id_;
222 return new_id;
225 bool SpeechRecognitionDispatcher::HandleExists(
226 const WebSpeechRecognitionHandle& handle) {
227 for (HandleMap::iterator iter = handle_map_.begin();
228 iter != handle_map_.end();
229 ++iter) {
230 if (iter->second.equals(handle))
231 return true;
233 return false;
236 const WebSpeechRecognitionHandle& SpeechRecognitionDispatcher::GetHandleFromID(
237 int request_id) {
238 HandleMap::iterator iter = handle_map_.find(request_id);
239 DCHECK(iter != handle_map_.end());
240 return iter->second;
243 } // namespace content