Set WebKeyboardEvent.domCode on Windows.
[chromium-blink-merge.git] / content / browser / speech / speech_recognition_manager_impl.cc
blobf02c29f1dd41ee642cbffd91f1206b4443ea708c
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/browser/speech/speech_recognition_manager_impl.h"
7 #include "base/bind.h"
8 #include "content/browser/browser_main_loop.h"
9 #include "content/browser/renderer_host/media/media_stream_manager.h"
10 #include "content/browser/renderer_host/media/media_stream_ui_proxy.h"
11 #include "content/browser/speech/google_one_shot_remote_engine.h"
12 #include "content/browser/speech/google_streaming_remote_engine.h"
13 #include "content/browser/speech/speech_recognition_engine.h"
14 #include "content/browser/speech/speech_recognizer_impl.h"
15 #include "content/public/browser/browser_thread.h"
16 #include "content/public/browser/content_browser_client.h"
17 #include "content/public/browser/resource_context.h"
18 #include "content/public/browser/speech_recognition_event_listener.h"
19 #include "content/public/browser/speech_recognition_manager_delegate.h"
20 #include "content/public/browser/speech_recognition_session_config.h"
21 #include "content/public/browser/speech_recognition_session_context.h"
22 #include "content/public/common/speech_recognition_error.h"
23 #include "content/public/common/speech_recognition_result.h"
24 #include "media/audio/audio_manager.h"
25 #include "media/audio/audio_manager_base.h"
27 #if defined(OS_ANDROID)
28 #include "content/browser/speech/speech_recognizer_impl_android.h"
29 #endif
31 using base::Callback;
33 namespace content {
35 SpeechRecognitionManager* SpeechRecognitionManager::manager_for_tests_;
37 namespace {
39 SpeechRecognitionManagerImpl* g_speech_recognition_manager_impl;
41 void ShowAudioInputSettingsOnFileThread(media::AudioManager* audio_manager) {
42 DCHECK_CURRENTLY_ON(BrowserThread::FILE);
43 audio_manager->ShowAudioInputSettings();
46 } // namespace
48 SpeechRecognitionManager* SpeechRecognitionManager::GetInstance() {
49 if (manager_for_tests_)
50 return manager_for_tests_;
51 return SpeechRecognitionManagerImpl::GetInstance();
54 void SpeechRecognitionManager::SetManagerForTesting(
55 SpeechRecognitionManager* manager) {
56 manager_for_tests_ = manager;
59 SpeechRecognitionManagerImpl* SpeechRecognitionManagerImpl::GetInstance() {
60 return g_speech_recognition_manager_impl;
63 SpeechRecognitionManagerImpl::SpeechRecognitionManagerImpl(
64 media::AudioManager* audio_manager,
65 MediaStreamManager* media_stream_manager)
66 : audio_manager_(audio_manager),
67 media_stream_manager_(media_stream_manager),
68 primary_session_id_(kSessionIDInvalid),
69 last_session_id_(kSessionIDInvalid),
70 is_dispatching_event_(false),
71 delegate_(GetContentClient()->browser()->
72 CreateSpeechRecognitionManagerDelegate()),
73 weak_factory_(this) {
74 DCHECK(!g_speech_recognition_manager_impl);
75 g_speech_recognition_manager_impl = this;
78 SpeechRecognitionManagerImpl::~SpeechRecognitionManagerImpl() {
79 DCHECK(g_speech_recognition_manager_impl);
80 g_speech_recognition_manager_impl = NULL;
82 for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end();
83 ++it) {
84 // MediaStreamUIProxy must be deleted on the IO thread.
85 BrowserThread::DeleteSoon(BrowserThread::IO, FROM_HERE,
86 it->second->ui.release());
87 delete it->second;
89 sessions_.clear();
92 int SpeechRecognitionManagerImpl::CreateSession(
93 const SpeechRecognitionSessionConfig& config) {
94 DCHECK_CURRENTLY_ON(BrowserThread::IO);
96 const int session_id = GetNextSessionID();
97 DCHECK(!SessionExists(session_id));
98 // Set-up the new session.
99 Session* session = new Session();
100 sessions_[session_id] = session;
101 session->id = session_id;
102 session->config = config;
103 session->context = config.initial_context;
105 std::string hardware_info;
106 bool can_report_metrics = false;
107 if (delegate_)
108 delegate_->GetDiagnosticInformation(&can_report_metrics, &hardware_info);
110 // The legacy api cannot use continuous mode.
111 DCHECK(!config.is_legacy_api || !config.continuous);
113 #if !defined(OS_ANDROID)
114 // A SpeechRecognitionEngine (and corresponding Config) is required only
115 // when using SpeechRecognizerImpl, which performs the audio capture and
116 // endpointing in the browser. This is not the case of Android where, not
117 // only the speech recognition, but also the audio capture and endpointing
118 // activities performed outside of the browser (delegated via JNI to the
119 // Android API implementation).
121 SpeechRecognitionEngineConfig remote_engine_config;
122 remote_engine_config.language = config.language;
123 remote_engine_config.grammars = config.grammars;
124 remote_engine_config.audio_sample_rate =
125 SpeechRecognizerImpl::kAudioSampleRate;
126 remote_engine_config.audio_num_bits_per_sample =
127 SpeechRecognizerImpl::kNumBitsPerAudioSample;
128 remote_engine_config.filter_profanities = config.filter_profanities;
129 remote_engine_config.continuous = config.continuous;
130 remote_engine_config.interim_results = config.interim_results;
131 remote_engine_config.max_hypotheses = config.max_hypotheses;
132 remote_engine_config.hardware_info = hardware_info;
133 remote_engine_config.origin_url =
134 can_report_metrics ? config.origin_url : std::string();
135 remote_engine_config.auth_token = config.auth_token;
136 remote_engine_config.auth_scope = config.auth_scope;
137 remote_engine_config.preamble = config.preamble;
139 SpeechRecognitionEngine* google_remote_engine;
140 if (config.is_legacy_api) {
141 google_remote_engine =
142 new GoogleOneShotRemoteEngine(config.url_request_context_getter.get());
143 } else {
144 google_remote_engine = new GoogleStreamingRemoteEngine(
145 config.url_request_context_getter.get());
148 google_remote_engine->SetConfig(remote_engine_config);
150 session->recognizer = new SpeechRecognizerImpl(
151 this,
152 session_id,
153 config.continuous,
154 config.interim_results,
155 google_remote_engine);
156 #else
157 session->recognizer = new SpeechRecognizerImplAndroid(this, session_id);
158 #endif
159 return session_id;
162 void SpeechRecognitionManagerImpl::StartSession(int session_id) {
163 DCHECK_CURRENTLY_ON(BrowserThread::IO);
164 if (!SessionExists(session_id))
165 return;
167 // If there is another active session, abort that.
168 if (primary_session_id_ != kSessionIDInvalid &&
169 primary_session_id_ != session_id) {
170 AbortSession(primary_session_id_);
173 primary_session_id_ = session_id;
175 if (delegate_) {
176 delegate_->CheckRecognitionIsAllowed(
177 session_id,
178 base::Bind(&SpeechRecognitionManagerImpl::RecognitionAllowedCallback,
179 weak_factory_.GetWeakPtr(),
180 session_id));
184 void SpeechRecognitionManagerImpl::RecognitionAllowedCallback(int session_id,
185 bool ask_user,
186 bool is_allowed) {
187 DCHECK_CURRENTLY_ON(BrowserThread::IO);
188 if (!SessionExists(session_id))
189 return;
191 SessionsTable::iterator iter = sessions_.find(session_id);
192 DCHECK(iter != sessions_.end());
193 Session* session = iter->second;
195 if (session->abort_requested)
196 return;
198 if (ask_user) {
199 SpeechRecognitionSessionContext& context = session->context;
200 context.label = media_stream_manager_->MakeMediaAccessRequest(
201 context.render_process_id,
202 context.render_frame_id,
203 context.request_id,
204 StreamOptions(true, false),
205 GURL(context.context_name),
206 base::Bind(
207 &SpeechRecognitionManagerImpl::MediaRequestPermissionCallback,
208 weak_factory_.GetWeakPtr(), session_id));
209 return;
212 if (is_allowed) {
213 base::MessageLoop::current()->PostTask(
214 FROM_HERE,
215 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent,
216 weak_factory_.GetWeakPtr(),
217 session_id,
218 EVENT_START));
219 } else {
220 OnRecognitionError(session_id, SpeechRecognitionError(
221 SPEECH_RECOGNITION_ERROR_NOT_ALLOWED));
222 base::MessageLoop::current()->PostTask(
223 FROM_HERE,
224 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent,
225 weak_factory_.GetWeakPtr(),
226 session_id,
227 EVENT_ABORT));
231 void SpeechRecognitionManagerImpl::MediaRequestPermissionCallback(
232 int session_id,
233 const MediaStreamDevices& devices,
234 scoped_ptr<MediaStreamUIProxy> stream_ui) {
235 DCHECK_CURRENTLY_ON(BrowserThread::IO);
237 SessionsTable::iterator iter = sessions_.find(session_id);
238 if (iter == sessions_.end())
239 return;
241 bool is_allowed = !devices.empty();
242 if (is_allowed) {
243 // Copy the approved devices array to the context for UI indication.
244 iter->second->context.devices = devices;
246 // Save the UI object.
247 iter->second->ui = stream_ui.Pass();
250 // Clear the label to indicate the request has been done.
251 iter->second->context.label.clear();
253 // Notify the recognition about the request result.
254 RecognitionAllowedCallback(iter->first, false, is_allowed);
257 void SpeechRecognitionManagerImpl::AbortSession(int session_id) {
258 DCHECK_CURRENTLY_ON(BrowserThread::IO);
259 if (!SessionExists(session_id))
260 return;
262 SessionsTable::iterator iter = sessions_.find(session_id);
263 iter->second->ui.reset();
265 if (iter->second->abort_requested)
266 return;
268 iter->second->abort_requested = true;
270 base::MessageLoop::current()->PostTask(
271 FROM_HERE,
272 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent,
273 weak_factory_.GetWeakPtr(),
274 session_id,
275 EVENT_ABORT));
278 void SpeechRecognitionManagerImpl::StopAudioCaptureForSession(int session_id) {
279 DCHECK_CURRENTLY_ON(BrowserThread::IO);
280 if (!SessionExists(session_id))
281 return;
283 SessionsTable::iterator iter = sessions_.find(session_id);
284 iter->second->ui.reset();
286 base::MessageLoop::current()->PostTask(
287 FROM_HERE,
288 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent,
289 weak_factory_.GetWeakPtr(),
290 session_id,
291 EVENT_STOP_CAPTURE));
294 // Here begins the SpeechRecognitionEventListener interface implementation,
295 // which will simply relay the events to the proper listener registered for the
296 // particular session and to the catch-all listener provided by the delegate
297 // (if any).
299 void SpeechRecognitionManagerImpl::OnRecognitionStart(int session_id) {
300 DCHECK_CURRENTLY_ON(BrowserThread::IO);
301 if (!SessionExists(session_id))
302 return;
304 SessionsTable::iterator iter = sessions_.find(session_id);
305 if (iter->second->ui) {
306 // Notify the UI that the devices are being used.
307 iter->second->ui->OnStarted(base::Closure(),
308 MediaStreamUIProxy::WindowIdCallback());
311 DCHECK_EQ(primary_session_id_, session_id);
312 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener())
313 delegate_listener->OnRecognitionStart(session_id);
314 if (SpeechRecognitionEventListener* listener = GetListener(session_id))
315 listener->OnRecognitionStart(session_id);
318 void SpeechRecognitionManagerImpl::OnAudioStart(int session_id) {
319 DCHECK_CURRENTLY_ON(BrowserThread::IO);
320 if (!SessionExists(session_id))
321 return;
323 DCHECK_EQ(primary_session_id_, session_id);
324 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener())
325 delegate_listener->OnAudioStart(session_id);
326 if (SpeechRecognitionEventListener* listener = GetListener(session_id))
327 listener->OnAudioStart(session_id);
330 void SpeechRecognitionManagerImpl::OnEnvironmentEstimationComplete(
331 int session_id) {
332 DCHECK_CURRENTLY_ON(BrowserThread::IO);
333 if (!SessionExists(session_id))
334 return;
336 DCHECK_EQ(primary_session_id_, session_id);
337 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener())
338 delegate_listener->OnEnvironmentEstimationComplete(session_id);
339 if (SpeechRecognitionEventListener* listener = GetListener(session_id))
340 listener->OnEnvironmentEstimationComplete(session_id);
343 void SpeechRecognitionManagerImpl::OnSoundStart(int session_id) {
344 DCHECK_CURRENTLY_ON(BrowserThread::IO);
345 if (!SessionExists(session_id))
346 return;
348 DCHECK_EQ(primary_session_id_, session_id);
349 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener())
350 delegate_listener->OnSoundStart(session_id);
351 if (SpeechRecognitionEventListener* listener = GetListener(session_id))
352 listener->OnSoundStart(session_id);
355 void SpeechRecognitionManagerImpl::OnSoundEnd(int session_id) {
356 DCHECK_CURRENTLY_ON(BrowserThread::IO);
357 if (!SessionExists(session_id))
358 return;
360 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener())
361 delegate_listener->OnSoundEnd(session_id);
362 if (SpeechRecognitionEventListener* listener = GetListener(session_id))
363 listener->OnSoundEnd(session_id);
366 void SpeechRecognitionManagerImpl::OnAudioEnd(int session_id) {
367 DCHECK_CURRENTLY_ON(BrowserThread::IO);
368 if (!SessionExists(session_id))
369 return;
371 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener())
372 delegate_listener->OnAudioEnd(session_id);
373 if (SpeechRecognitionEventListener* listener = GetListener(session_id))
374 listener->OnAudioEnd(session_id);
375 base::MessageLoop::current()->PostTask(
376 FROM_HERE,
377 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent,
378 weak_factory_.GetWeakPtr(),
379 session_id,
380 EVENT_AUDIO_ENDED));
383 void SpeechRecognitionManagerImpl::OnRecognitionResults(
384 int session_id, const SpeechRecognitionResults& results) {
385 DCHECK_CURRENTLY_ON(BrowserThread::IO);
386 if (!SessionExists(session_id))
387 return;
389 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener())
390 delegate_listener->OnRecognitionResults(session_id, results);
391 if (SpeechRecognitionEventListener* listener = GetListener(session_id))
392 listener->OnRecognitionResults(session_id, results);
395 void SpeechRecognitionManagerImpl::OnRecognitionError(
396 int session_id, const SpeechRecognitionError& error) {
397 DCHECK_CURRENTLY_ON(BrowserThread::IO);
398 if (!SessionExists(session_id))
399 return;
401 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener())
402 delegate_listener->OnRecognitionError(session_id, error);
403 if (SpeechRecognitionEventListener* listener = GetListener(session_id))
404 listener->OnRecognitionError(session_id, error);
407 void SpeechRecognitionManagerImpl::OnAudioLevelsChange(
408 int session_id, float volume, float noise_volume) {
409 DCHECK_CURRENTLY_ON(BrowserThread::IO);
410 if (!SessionExists(session_id))
411 return;
413 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener())
414 delegate_listener->OnAudioLevelsChange(session_id, volume, noise_volume);
415 if (SpeechRecognitionEventListener* listener = GetListener(session_id))
416 listener->OnAudioLevelsChange(session_id, volume, noise_volume);
419 void SpeechRecognitionManagerImpl::OnRecognitionEnd(int session_id) {
420 DCHECK_CURRENTLY_ON(BrowserThread::IO);
421 if (!SessionExists(session_id))
422 return;
424 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener())
425 delegate_listener->OnRecognitionEnd(session_id);
426 if (SpeechRecognitionEventListener* listener = GetListener(session_id))
427 listener->OnRecognitionEnd(session_id);
428 base::MessageLoop::current()->PostTask(
429 FROM_HERE,
430 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent,
431 weak_factory_.GetWeakPtr(),
432 session_id,
433 EVENT_RECOGNITION_ENDED));
436 int SpeechRecognitionManagerImpl::GetSession(
437 int render_process_id, int render_view_id, int request_id) const {
438 DCHECK_CURRENTLY_ON(BrowserThread::IO);
439 SessionsTable::const_iterator iter;
440 for (iter = sessions_.begin(); iter != sessions_.end(); ++iter) {
441 const int session_id = iter->first;
442 const SpeechRecognitionSessionContext& context = iter->second->context;
443 if (context.render_process_id == render_process_id &&
444 context.render_view_id == render_view_id &&
445 context.request_id == request_id) {
446 return session_id;
449 return kSessionIDInvalid;
452 SpeechRecognitionSessionContext
453 SpeechRecognitionManagerImpl::GetSessionContext(int session_id) const {
454 return GetSession(session_id)->context;
457 void SpeechRecognitionManagerImpl::AbortAllSessionsForRenderProcess(
458 int render_process_id) {
459 // This method gracefully destroys sessions for the listener. However, since
460 // the listener itself is likely to be destroyed after this call, we avoid
461 // dispatching further events to it, marking the |listener_is_active| flag.
462 DCHECK_CURRENTLY_ON(BrowserThread::IO);
463 for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end();
464 ++it) {
465 Session* session = it->second;
466 if (session->context.render_process_id == render_process_id) {
467 AbortSession(session->id);
468 session->listener_is_active = false;
473 void SpeechRecognitionManagerImpl::AbortAllSessionsForRenderView(
474 int render_process_id,
475 int render_view_id) {
476 DCHECK_CURRENTLY_ON(BrowserThread::IO);
477 for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end();
478 ++it) {
479 Session* session = it->second;
480 if (session->context.render_process_id == render_process_id &&
481 session->context.render_view_id == render_view_id) {
482 AbortSession(session->id);
487 // ----------------------- Core FSM implementation ---------------------------
488 void SpeechRecognitionManagerImpl::DispatchEvent(int session_id,
489 FSMEvent event) {
490 DCHECK_CURRENTLY_ON(BrowserThread::IO);
492 // There are some corner cases in which the session might be deleted (due to
493 // an EndRecognition event) between a request (e.g. Abort) and its dispatch.
494 if (!SessionExists(session_id))
495 return;
497 Session* session = GetSession(session_id);
498 FSMState session_state = GetSessionState(session_id);
499 DCHECK_LE(session_state, SESSION_STATE_MAX_VALUE);
500 DCHECK_LE(event, EVENT_MAX_VALUE);
502 // Event dispatching must be sequential, otherwise it will break all the rules
503 // and the assumptions of the finite state automata model.
504 DCHECK(!is_dispatching_event_);
505 is_dispatching_event_ = true;
506 ExecuteTransitionAndGetNextState(session, session_state, event);
507 is_dispatching_event_ = false;
510 // This FSM handles the evolution of each session, from the viewpoint of the
511 // interaction with the user (that may be either the browser end-user which
512 // interacts with UI bubbles, or JS developer intracting with JS methods).
513 // All the events received by the SpeechRecognizer instances (one for each
514 // session) are always routed to the SpeechRecognitionEventListener(s)
515 // regardless the choices taken in this FSM.
516 void SpeechRecognitionManagerImpl::ExecuteTransitionAndGetNextState(
517 Session* session, FSMState session_state, FSMEvent event) {
518 // Note: since we're not tracking the state of the recognizer object, rather
519 // we're directly retrieving it (through GetSessionState), we see its events
520 // (that are AUDIO_ENDED and RECOGNITION_ENDED) after its state evolution
521 // (e.g., when we receive the AUDIO_ENDED event, the recognizer has just
522 // completed the transition from CAPTURING_AUDIO to WAITING_FOR_RESULT, thus
523 // we perceive the AUDIO_ENDED event in WAITING_FOR_RESULT).
524 // This makes the code below a bit tricky but avoids a lot of code for
525 // tracking and reconstructing asynchronously the state of the recognizer.
526 switch (session_state) {
527 case SESSION_STATE_IDLE:
528 switch (event) {
529 case EVENT_START:
530 return SessionStart(*session);
531 case EVENT_ABORT:
532 return SessionAbort(*session);
533 case EVENT_RECOGNITION_ENDED:
534 return SessionDelete(session);
535 case EVENT_STOP_CAPTURE:
536 return SessionStopAudioCapture(*session);
537 case EVENT_AUDIO_ENDED:
538 return;
540 break;
541 case SESSION_STATE_CAPTURING_AUDIO:
542 switch (event) {
543 case EVENT_STOP_CAPTURE:
544 return SessionStopAudioCapture(*session);
545 case EVENT_ABORT:
546 return SessionAbort(*session);
547 case EVENT_START:
548 return;
549 case EVENT_AUDIO_ENDED:
550 case EVENT_RECOGNITION_ENDED:
551 return NotFeasible(*session, event);
553 break;
554 case SESSION_STATE_WAITING_FOR_RESULT:
555 switch (event) {
556 case EVENT_ABORT:
557 return SessionAbort(*session);
558 case EVENT_AUDIO_ENDED:
559 return ResetCapturingSessionId(*session);
560 case EVENT_START:
561 case EVENT_STOP_CAPTURE:
562 return;
563 case EVENT_RECOGNITION_ENDED:
564 return NotFeasible(*session, event);
566 break;
568 return NotFeasible(*session, event);
571 SpeechRecognitionManagerImpl::FSMState
572 SpeechRecognitionManagerImpl::GetSessionState(int session_id) const {
573 Session* session = GetSession(session_id);
574 if (!session->recognizer.get() || !session->recognizer->IsActive())
575 return SESSION_STATE_IDLE;
576 if (session->recognizer->IsCapturingAudio())
577 return SESSION_STATE_CAPTURING_AUDIO;
578 return SESSION_STATE_WAITING_FOR_RESULT;
581 // ----------- Contract for all the FSM evolution functions below -------------
582 // - Are guaranteed to be executed in the IO thread;
583 // - Are guaranteed to be not reentrant (themselves and each other);
585 void SpeechRecognitionManagerImpl::SessionStart(const Session& session) {
586 DCHECK_EQ(primary_session_id_, session.id);
587 const MediaStreamDevices& devices = session.context.devices;
588 std::string device_id;
589 if (devices.empty()) {
590 // From the ask_user=false path, use the default device.
591 // TODO(xians): Abort the session after we do not need to support this path
592 // anymore.
593 device_id = media::AudioManagerBase::kDefaultDeviceId;
594 } else {
595 // From the ask_user=true path, use the selected device.
596 DCHECK_EQ(1u, devices.size());
597 DCHECK_EQ(MEDIA_DEVICE_AUDIO_CAPTURE, devices.front().type);
598 device_id = devices.front().id;
601 session.recognizer->StartRecognition(device_id);
604 void SpeechRecognitionManagerImpl::SessionAbort(const Session& session) {
605 if (primary_session_id_ == session.id)
606 primary_session_id_ = kSessionIDInvalid;
607 DCHECK(session.recognizer.get());
608 session.recognizer->AbortRecognition();
611 void SpeechRecognitionManagerImpl::SessionStopAudioCapture(
612 const Session& session) {
613 DCHECK(session.recognizer.get());
614 session.recognizer->StopAudioCapture();
617 void SpeechRecognitionManagerImpl::ResetCapturingSessionId(
618 const Session& session) {
619 DCHECK_EQ(primary_session_id_, session.id);
620 primary_session_id_ = kSessionIDInvalid;
623 void SpeechRecognitionManagerImpl::SessionDelete(Session* session) {
624 DCHECK(session->recognizer.get() == NULL || !session->recognizer->IsActive());
625 if (primary_session_id_ == session->id)
626 primary_session_id_ = kSessionIDInvalid;
627 if (!session->context.label.empty())
628 media_stream_manager_->CancelRequest(session->context.label);
629 sessions_.erase(session->id);
630 delete session;
633 void SpeechRecognitionManagerImpl::NotFeasible(const Session& session,
634 FSMEvent event) {
635 NOTREACHED() << "Unfeasible event " << event
636 << " in state " << GetSessionState(session.id)
637 << " for session " << session.id;
640 int SpeechRecognitionManagerImpl::GetNextSessionID() {
641 ++last_session_id_;
642 // Deal with wrapping of last_session_id_. (How civilized).
643 if (last_session_id_ <= 0)
644 last_session_id_ = 1;
645 return last_session_id_;
648 bool SpeechRecognitionManagerImpl::SessionExists(int session_id) const {
649 return sessions_.find(session_id) != sessions_.end();
652 SpeechRecognitionManagerImpl::Session*
653 SpeechRecognitionManagerImpl::GetSession(int session_id) const {
654 DCHECK_CURRENTLY_ON(BrowserThread::IO);
655 SessionsTable::const_iterator iter = sessions_.find(session_id);
656 DCHECK(iter != sessions_.end());
657 return iter->second;
660 SpeechRecognitionEventListener* SpeechRecognitionManagerImpl::GetListener(
661 int session_id) const {
662 Session* session = GetSession(session_id);
663 if (session->listener_is_active && session->config.event_listener)
664 return session->config.event_listener.get();
665 return NULL;
668 SpeechRecognitionEventListener*
669 SpeechRecognitionManagerImpl::GetDelegateListener() const {
670 return delegate_.get() ? delegate_->GetEventListener() : NULL;
673 const SpeechRecognitionSessionConfig&
674 SpeechRecognitionManagerImpl::GetSessionConfig(int session_id) const {
675 return GetSession(session_id)->config;
678 bool SpeechRecognitionManagerImpl::HasAudioInputDevices() {
679 return audio_manager_->HasAudioInputDevices();
682 base::string16 SpeechRecognitionManagerImpl::GetAudioInputDeviceModel() {
683 return audio_manager_->GetAudioInputDeviceModel();
686 void SpeechRecognitionManagerImpl::ShowAudioInputSettings() {
687 // Since AudioManager::ShowAudioInputSettings can potentially launch external
688 // processes, do that in the FILE thread to not block the calling threads.
689 BrowserThread::PostTask(BrowserThread::FILE, FROM_HERE,
690 base::Bind(&ShowAudioInputSettingsOnFileThread,
691 audio_manager_));
694 SpeechRecognitionManagerImpl::Session::Session()
695 : id(kSessionIDInvalid),
696 abort_requested(false),
697 listener_is_active(true) {
700 SpeechRecognitionManagerImpl::Session::~Session() {
703 } // namespace content