1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/browser/speech/speech_recognition_manager_impl.h"
8 #include "base/location.h"
9 #include "base/single_thread_task_runner.h"
10 #include "base/thread_task_runner_handle.h"
11 #include "content/browser/browser_main_loop.h"
12 #include "content/browser/renderer_host/media/media_stream_manager.h"
13 #include "content/browser/renderer_host/media/media_stream_ui_proxy.h"
14 #include "content/browser/speech/google_one_shot_remote_engine.h"
15 #include "content/browser/speech/google_streaming_remote_engine.h"
16 #include "content/browser/speech/speech_recognition_engine.h"
17 #include "content/browser/speech/speech_recognizer_impl.h"
18 #include "content/public/browser/browser_thread.h"
19 #include "content/public/browser/content_browser_client.h"
20 #include "content/public/browser/resource_context.h"
21 #include "content/public/browser/speech_recognition_event_listener.h"
22 #include "content/public/browser/speech_recognition_manager_delegate.h"
23 #include "content/public/browser/speech_recognition_session_config.h"
24 #include "content/public/browser/speech_recognition_session_context.h"
25 #include "content/public/common/speech_recognition_error.h"
26 #include "content/public/common/speech_recognition_result.h"
27 #include "media/audio/audio_manager.h"
28 #include "media/audio/audio_manager_base.h"
30 #if defined(OS_ANDROID)
31 #include "content/browser/speech/speech_recognizer_impl_android.h"
38 SpeechRecognitionManager
* SpeechRecognitionManager::manager_for_tests_
;
42 SpeechRecognitionManagerImpl
* g_speech_recognition_manager_impl
;
44 void ShowAudioInputSettingsOnFileThread(media::AudioManager
* audio_manager
) {
45 DCHECK_CURRENTLY_ON(BrowserThread::FILE);
46 audio_manager
->ShowAudioInputSettings();
51 SpeechRecognitionManager
* SpeechRecognitionManager::GetInstance() {
52 if (manager_for_tests_
)
53 return manager_for_tests_
;
54 return SpeechRecognitionManagerImpl::GetInstance();
57 void SpeechRecognitionManager::SetManagerForTesting(
58 SpeechRecognitionManager
* manager
) {
59 manager_for_tests_
= manager
;
62 SpeechRecognitionManagerImpl
* SpeechRecognitionManagerImpl::GetInstance() {
63 return g_speech_recognition_manager_impl
;
66 SpeechRecognitionManagerImpl::SpeechRecognitionManagerImpl(
67 media::AudioManager
* audio_manager
,
68 MediaStreamManager
* media_stream_manager
)
69 : audio_manager_(audio_manager
),
70 media_stream_manager_(media_stream_manager
),
71 primary_session_id_(kSessionIDInvalid
),
72 last_session_id_(kSessionIDInvalid
),
73 is_dispatching_event_(false),
74 delegate_(GetContentClient()->browser()->
75 CreateSpeechRecognitionManagerDelegate()),
77 DCHECK(!g_speech_recognition_manager_impl
);
78 g_speech_recognition_manager_impl
= this;
81 SpeechRecognitionManagerImpl::~SpeechRecognitionManagerImpl() {
82 DCHECK(g_speech_recognition_manager_impl
);
83 g_speech_recognition_manager_impl
= NULL
;
85 for (SessionsTable::iterator it
= sessions_
.begin(); it
!= sessions_
.end();
87 // MediaStreamUIProxy must be deleted on the IO thread.
88 BrowserThread::DeleteSoon(BrowserThread::IO
, FROM_HERE
,
89 it
->second
->ui
.release());
95 int SpeechRecognitionManagerImpl::CreateSession(
96 const SpeechRecognitionSessionConfig
& config
) {
97 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
99 const int session_id
= GetNextSessionID();
100 DCHECK(!SessionExists(session_id
));
101 // Set-up the new session.
102 Session
* session
= new Session();
103 sessions_
[session_id
] = session
;
104 session
->id
= session_id
;
105 session
->config
= config
;
106 session
->context
= config
.initial_context
;
108 std::string hardware_info
;
109 bool can_report_metrics
= false;
111 delegate_
->GetDiagnosticInformation(&can_report_metrics
, &hardware_info
);
113 // The legacy api cannot use continuous mode.
114 DCHECK(!config
.is_legacy_api
|| !config
.continuous
);
116 #if !defined(OS_ANDROID)
117 // A SpeechRecognitionEngine (and corresponding Config) is required only
118 // when using SpeechRecognizerImpl, which performs the audio capture and
119 // endpointing in the browser. This is not the case of Android where, not
120 // only the speech recognition, but also the audio capture and endpointing
121 // activities performed outside of the browser (delegated via JNI to the
122 // Android API implementation).
124 SpeechRecognitionEngineConfig remote_engine_config
;
125 remote_engine_config
.language
= config
.language
;
126 remote_engine_config
.grammars
= config
.grammars
;
127 remote_engine_config
.audio_sample_rate
=
128 SpeechRecognizerImpl::kAudioSampleRate
;
129 remote_engine_config
.audio_num_bits_per_sample
=
130 SpeechRecognizerImpl::kNumBitsPerAudioSample
;
131 remote_engine_config
.filter_profanities
= config
.filter_profanities
;
132 remote_engine_config
.continuous
= config
.continuous
;
133 remote_engine_config
.interim_results
= config
.interim_results
;
134 remote_engine_config
.max_hypotheses
= config
.max_hypotheses
;
135 remote_engine_config
.hardware_info
= hardware_info
;
136 remote_engine_config
.origin_url
=
137 can_report_metrics
? config
.origin_url
: std::string();
138 remote_engine_config
.auth_token
= config
.auth_token
;
139 remote_engine_config
.auth_scope
= config
.auth_scope
;
140 remote_engine_config
.preamble
= config
.preamble
;
142 SpeechRecognitionEngine
* google_remote_engine
;
143 if (config
.is_legacy_api
) {
144 google_remote_engine
=
145 new GoogleOneShotRemoteEngine(config
.url_request_context_getter
.get());
147 google_remote_engine
= new GoogleStreamingRemoteEngine(
148 config
.url_request_context_getter
.get());
151 google_remote_engine
->SetConfig(remote_engine_config
);
153 session
->recognizer
= new SpeechRecognizerImpl(
157 config
.interim_results
,
158 google_remote_engine
);
160 session
->recognizer
= new SpeechRecognizerImplAndroid(this, session_id
);
165 void SpeechRecognitionManagerImpl::StartSession(int session_id
) {
166 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
167 if (!SessionExists(session_id
))
170 // If there is another active session, abort that.
171 if (primary_session_id_
!= kSessionIDInvalid
&&
172 primary_session_id_
!= session_id
) {
173 AbortSession(primary_session_id_
);
176 primary_session_id_
= session_id
;
179 delegate_
->CheckRecognitionIsAllowed(
181 base::Bind(&SpeechRecognitionManagerImpl::RecognitionAllowedCallback
,
182 weak_factory_
.GetWeakPtr(),
187 void SpeechRecognitionManagerImpl::RecognitionAllowedCallback(int session_id
,
190 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
191 if (!SessionExists(session_id
))
194 SessionsTable::iterator iter
= sessions_
.find(session_id
);
195 DCHECK(iter
!= sessions_
.end());
196 Session
* session
= iter
->second
;
198 if (session
->abort_requested
)
202 SpeechRecognitionSessionContext
& context
= session
->context
;
203 context
.label
= media_stream_manager_
->MakeMediaAccessRequest(
204 context
.render_process_id
,
205 context
.render_frame_id
,
207 StreamOptions(true, false),
208 GURL(context
.context_name
),
210 &SpeechRecognitionManagerImpl::MediaRequestPermissionCallback
,
211 weak_factory_
.GetWeakPtr(), session_id
));
216 base::ThreadTaskRunnerHandle::Get()->PostTask(
218 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent
,
219 weak_factory_
.GetWeakPtr(), session_id
, EVENT_START
));
221 OnRecognitionError(session_id
, SpeechRecognitionError(
222 SPEECH_RECOGNITION_ERROR_NOT_ALLOWED
));
223 base::ThreadTaskRunnerHandle::Get()->PostTask(
225 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent
,
226 weak_factory_
.GetWeakPtr(), session_id
, EVENT_ABORT
));
230 void SpeechRecognitionManagerImpl::MediaRequestPermissionCallback(
232 const MediaStreamDevices
& devices
,
233 scoped_ptr
<MediaStreamUIProxy
> stream_ui
) {
234 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
236 SessionsTable::iterator iter
= sessions_
.find(session_id
);
237 if (iter
== sessions_
.end())
240 bool is_allowed
= !devices
.empty();
242 // Copy the approved devices array to the context for UI indication.
243 iter
->second
->context
.devices
= devices
;
245 // Save the UI object.
246 iter
->second
->ui
= stream_ui
.Pass();
249 // Clear the label to indicate the request has been done.
250 iter
->second
->context
.label
.clear();
252 // Notify the recognition about the request result.
253 RecognitionAllowedCallback(iter
->first
, false, is_allowed
);
256 void SpeechRecognitionManagerImpl::AbortSession(int session_id
) {
257 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
258 if (!SessionExists(session_id
))
261 SessionsTable::iterator iter
= sessions_
.find(session_id
);
262 iter
->second
->ui
.reset();
264 if (iter
->second
->abort_requested
)
267 iter
->second
->abort_requested
= true;
269 base::ThreadTaskRunnerHandle::Get()->PostTask(
271 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent
,
272 weak_factory_
.GetWeakPtr(), session_id
, EVENT_ABORT
));
275 void SpeechRecognitionManagerImpl::StopAudioCaptureForSession(int session_id
) {
276 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
277 if (!SessionExists(session_id
))
280 SessionsTable::iterator iter
= sessions_
.find(session_id
);
281 iter
->second
->ui
.reset();
283 base::ThreadTaskRunnerHandle::Get()->PostTask(
285 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent
,
286 weak_factory_
.GetWeakPtr(), session_id
, EVENT_STOP_CAPTURE
));
289 // Here begins the SpeechRecognitionEventListener interface implementation,
290 // which will simply relay the events to the proper listener registered for the
291 // particular session and to the catch-all listener provided by the delegate
294 void SpeechRecognitionManagerImpl::OnRecognitionStart(int session_id
) {
295 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
296 if (!SessionExists(session_id
))
299 SessionsTable::iterator iter
= sessions_
.find(session_id
);
300 if (iter
->second
->ui
) {
301 // Notify the UI that the devices are being used.
302 iter
->second
->ui
->OnStarted(base::Closure(),
303 MediaStreamUIProxy::WindowIdCallback());
306 DCHECK_EQ(primary_session_id_
, session_id
);
307 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
308 delegate_listener
->OnRecognitionStart(session_id
);
309 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
310 listener
->OnRecognitionStart(session_id
);
313 void SpeechRecognitionManagerImpl::OnAudioStart(int session_id
) {
314 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
315 if (!SessionExists(session_id
))
318 DCHECK_EQ(primary_session_id_
, session_id
);
319 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
320 delegate_listener
->OnAudioStart(session_id
);
321 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
322 listener
->OnAudioStart(session_id
);
325 void SpeechRecognitionManagerImpl::OnEnvironmentEstimationComplete(
327 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
328 if (!SessionExists(session_id
))
331 DCHECK_EQ(primary_session_id_
, session_id
);
332 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
333 delegate_listener
->OnEnvironmentEstimationComplete(session_id
);
334 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
335 listener
->OnEnvironmentEstimationComplete(session_id
);
338 void SpeechRecognitionManagerImpl::OnSoundStart(int session_id
) {
339 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
340 if (!SessionExists(session_id
))
343 DCHECK_EQ(primary_session_id_
, session_id
);
344 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
345 delegate_listener
->OnSoundStart(session_id
);
346 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
347 listener
->OnSoundStart(session_id
);
350 void SpeechRecognitionManagerImpl::OnSoundEnd(int session_id
) {
351 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
352 if (!SessionExists(session_id
))
355 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
356 delegate_listener
->OnSoundEnd(session_id
);
357 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
358 listener
->OnSoundEnd(session_id
);
361 void SpeechRecognitionManagerImpl::OnAudioEnd(int session_id
) {
362 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
363 if (!SessionExists(session_id
))
366 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
367 delegate_listener
->OnAudioEnd(session_id
);
368 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
369 listener
->OnAudioEnd(session_id
);
370 base::ThreadTaskRunnerHandle::Get()->PostTask(
372 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent
,
373 weak_factory_
.GetWeakPtr(), session_id
, EVENT_AUDIO_ENDED
));
376 void SpeechRecognitionManagerImpl::OnRecognitionResults(
377 int session_id
, const SpeechRecognitionResults
& results
) {
378 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
379 if (!SessionExists(session_id
))
382 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
383 delegate_listener
->OnRecognitionResults(session_id
, results
);
384 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
385 listener
->OnRecognitionResults(session_id
, results
);
388 void SpeechRecognitionManagerImpl::OnRecognitionError(
389 int session_id
, const SpeechRecognitionError
& error
) {
390 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
391 if (!SessionExists(session_id
))
394 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
395 delegate_listener
->OnRecognitionError(session_id
, error
);
396 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
397 listener
->OnRecognitionError(session_id
, error
);
400 void SpeechRecognitionManagerImpl::OnAudioLevelsChange(
401 int session_id
, float volume
, float noise_volume
) {
402 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
403 if (!SessionExists(session_id
))
406 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
407 delegate_listener
->OnAudioLevelsChange(session_id
, volume
, noise_volume
);
408 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
409 listener
->OnAudioLevelsChange(session_id
, volume
, noise_volume
);
412 void SpeechRecognitionManagerImpl::OnRecognitionEnd(int session_id
) {
413 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
414 if (!SessionExists(session_id
))
417 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
418 delegate_listener
->OnRecognitionEnd(session_id
);
419 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
420 listener
->OnRecognitionEnd(session_id
);
421 base::ThreadTaskRunnerHandle::Get()->PostTask(
422 FROM_HERE
, base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent
,
423 weak_factory_
.GetWeakPtr(), session_id
,
424 EVENT_RECOGNITION_ENDED
));
427 int SpeechRecognitionManagerImpl::GetSession(
428 int render_process_id
, int render_view_id
, int request_id
) const {
429 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
430 SessionsTable::const_iterator iter
;
431 for (iter
= sessions_
.begin(); iter
!= sessions_
.end(); ++iter
) {
432 const int session_id
= iter
->first
;
433 const SpeechRecognitionSessionContext
& context
= iter
->second
->context
;
434 if (context
.render_process_id
== render_process_id
&&
435 context
.render_view_id
== render_view_id
&&
436 context
.request_id
== request_id
) {
440 return kSessionIDInvalid
;
443 SpeechRecognitionSessionContext
444 SpeechRecognitionManagerImpl::GetSessionContext(int session_id
) const {
445 return GetSession(session_id
)->context
;
448 void SpeechRecognitionManagerImpl::AbortAllSessionsForRenderProcess(
449 int render_process_id
) {
450 // This method gracefully destroys sessions for the listener. However, since
451 // the listener itself is likely to be destroyed after this call, we avoid
452 // dispatching further events to it, marking the |listener_is_active| flag.
453 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
454 for (SessionsTable::iterator it
= sessions_
.begin(); it
!= sessions_
.end();
456 Session
* session
= it
->second
;
457 if (session
->context
.render_process_id
== render_process_id
) {
458 AbortSession(session
->id
);
459 session
->listener_is_active
= false;
464 void SpeechRecognitionManagerImpl::AbortAllSessionsForRenderView(
465 int render_process_id
,
466 int render_view_id
) {
467 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
468 for (SessionsTable::iterator it
= sessions_
.begin(); it
!= sessions_
.end();
470 Session
* session
= it
->second
;
471 if (session
->context
.render_process_id
== render_process_id
&&
472 session
->context
.render_view_id
== render_view_id
) {
473 AbortSession(session
->id
);
478 // ----------------------- Core FSM implementation ---------------------------
479 void SpeechRecognitionManagerImpl::DispatchEvent(int session_id
,
481 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
483 // There are some corner cases in which the session might be deleted (due to
484 // an EndRecognition event) between a request (e.g. Abort) and its dispatch.
485 if (!SessionExists(session_id
))
488 Session
* session
= GetSession(session_id
);
489 FSMState session_state
= GetSessionState(session_id
);
490 DCHECK_LE(session_state
, SESSION_STATE_MAX_VALUE
);
491 DCHECK_LE(event
, EVENT_MAX_VALUE
);
493 // Event dispatching must be sequential, otherwise it will break all the rules
494 // and the assumptions of the finite state automata model.
495 DCHECK(!is_dispatching_event_
);
496 is_dispatching_event_
= true;
497 ExecuteTransitionAndGetNextState(session
, session_state
, event
);
498 is_dispatching_event_
= false;
501 // This FSM handles the evolution of each session, from the viewpoint of the
502 // interaction with the user (that may be either the browser end-user which
503 // interacts with UI bubbles, or JS developer intracting with JS methods).
504 // All the events received by the SpeechRecognizer instances (one for each
505 // session) are always routed to the SpeechRecognitionEventListener(s)
506 // regardless the choices taken in this FSM.
507 void SpeechRecognitionManagerImpl::ExecuteTransitionAndGetNextState(
508 Session
* session
, FSMState session_state
, FSMEvent event
) {
509 // Note: since we're not tracking the state of the recognizer object, rather
510 // we're directly retrieving it (through GetSessionState), we see its events
511 // (that are AUDIO_ENDED and RECOGNITION_ENDED) after its state evolution
512 // (e.g., when we receive the AUDIO_ENDED event, the recognizer has just
513 // completed the transition from CAPTURING_AUDIO to WAITING_FOR_RESULT, thus
514 // we perceive the AUDIO_ENDED event in WAITING_FOR_RESULT).
515 // This makes the code below a bit tricky but avoids a lot of code for
516 // tracking and reconstructing asynchronously the state of the recognizer.
517 switch (session_state
) {
518 case SESSION_STATE_IDLE
:
521 return SessionStart(*session
);
523 return SessionAbort(*session
);
524 case EVENT_RECOGNITION_ENDED
:
525 return SessionDelete(session
);
526 case EVENT_STOP_CAPTURE
:
527 return SessionStopAudioCapture(*session
);
528 case EVENT_AUDIO_ENDED
:
532 case SESSION_STATE_CAPTURING_AUDIO
:
534 case EVENT_STOP_CAPTURE
:
535 return SessionStopAudioCapture(*session
);
537 return SessionAbort(*session
);
540 case EVENT_AUDIO_ENDED
:
541 case EVENT_RECOGNITION_ENDED
:
542 return NotFeasible(*session
, event
);
545 case SESSION_STATE_WAITING_FOR_RESULT
:
548 return SessionAbort(*session
);
549 case EVENT_AUDIO_ENDED
:
550 return ResetCapturingSessionId(*session
);
552 case EVENT_STOP_CAPTURE
:
554 case EVENT_RECOGNITION_ENDED
:
555 return NotFeasible(*session
, event
);
559 return NotFeasible(*session
, event
);
562 SpeechRecognitionManagerImpl::FSMState
563 SpeechRecognitionManagerImpl::GetSessionState(int session_id
) const {
564 Session
* session
= GetSession(session_id
);
565 if (!session
->recognizer
.get() || !session
->recognizer
->IsActive())
566 return SESSION_STATE_IDLE
;
567 if (session
->recognizer
->IsCapturingAudio())
568 return SESSION_STATE_CAPTURING_AUDIO
;
569 return SESSION_STATE_WAITING_FOR_RESULT
;
572 // ----------- Contract for all the FSM evolution functions below -------------
573 // - Are guaranteed to be executed in the IO thread;
574 // - Are guaranteed to be not reentrant (themselves and each other);
576 void SpeechRecognitionManagerImpl::SessionStart(const Session
& session
) {
577 DCHECK_EQ(primary_session_id_
, session
.id
);
578 const MediaStreamDevices
& devices
= session
.context
.devices
;
579 std::string device_id
;
580 if (devices
.empty()) {
581 // From the ask_user=false path, use the default device.
582 // TODO(xians): Abort the session after we do not need to support this path
584 device_id
= media::AudioManagerBase::kDefaultDeviceId
;
586 // From the ask_user=true path, use the selected device.
587 DCHECK_EQ(1u, devices
.size());
588 DCHECK_EQ(MEDIA_DEVICE_AUDIO_CAPTURE
, devices
.front().type
);
589 device_id
= devices
.front().id
;
592 session
.recognizer
->StartRecognition(device_id
);
595 void SpeechRecognitionManagerImpl::SessionAbort(const Session
& session
) {
596 if (primary_session_id_
== session
.id
)
597 primary_session_id_
= kSessionIDInvalid
;
598 DCHECK(session
.recognizer
.get());
599 session
.recognizer
->AbortRecognition();
602 void SpeechRecognitionManagerImpl::SessionStopAudioCapture(
603 const Session
& session
) {
604 DCHECK(session
.recognizer
.get());
605 session
.recognizer
->StopAudioCapture();
608 void SpeechRecognitionManagerImpl::ResetCapturingSessionId(
609 const Session
& session
) {
610 DCHECK_EQ(primary_session_id_
, session
.id
);
611 primary_session_id_
= kSessionIDInvalid
;
614 void SpeechRecognitionManagerImpl::SessionDelete(Session
* session
) {
615 DCHECK(session
->recognizer
.get() == NULL
|| !session
->recognizer
->IsActive());
616 if (primary_session_id_
== session
->id
)
617 primary_session_id_
= kSessionIDInvalid
;
618 if (!session
->context
.label
.empty())
619 media_stream_manager_
->CancelRequest(session
->context
.label
);
620 sessions_
.erase(session
->id
);
624 void SpeechRecognitionManagerImpl::NotFeasible(const Session
& session
,
626 NOTREACHED() << "Unfeasible event " << event
627 << " in state " << GetSessionState(session
.id
)
628 << " for session " << session
.id
;
631 int SpeechRecognitionManagerImpl::GetNextSessionID() {
633 // Deal with wrapping of last_session_id_. (How civilized).
634 if (last_session_id_
<= 0)
635 last_session_id_
= 1;
636 return last_session_id_
;
639 bool SpeechRecognitionManagerImpl::SessionExists(int session_id
) const {
640 return sessions_
.find(session_id
) != sessions_
.end();
643 SpeechRecognitionManagerImpl::Session
*
644 SpeechRecognitionManagerImpl::GetSession(int session_id
) const {
645 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
646 SessionsTable::const_iterator iter
= sessions_
.find(session_id
);
647 DCHECK(iter
!= sessions_
.end());
651 SpeechRecognitionEventListener
* SpeechRecognitionManagerImpl::GetListener(
652 int session_id
) const {
653 Session
* session
= GetSession(session_id
);
654 if (session
->listener_is_active
&& session
->config
.event_listener
)
655 return session
->config
.event_listener
.get();
659 SpeechRecognitionEventListener
*
660 SpeechRecognitionManagerImpl::GetDelegateListener() const {
661 return delegate_
.get() ? delegate_
->GetEventListener() : NULL
;
664 const SpeechRecognitionSessionConfig
&
665 SpeechRecognitionManagerImpl::GetSessionConfig(int session_id
) const {
666 return GetSession(session_id
)->config
;
669 bool SpeechRecognitionManagerImpl::HasAudioInputDevices() {
670 return audio_manager_
->HasAudioInputDevices();
673 base::string16
SpeechRecognitionManagerImpl::GetAudioInputDeviceModel() {
674 return audio_manager_
->GetAudioInputDeviceModel();
677 void SpeechRecognitionManagerImpl::ShowAudioInputSettings() {
678 // Since AudioManager::ShowAudioInputSettings can potentially launch external
679 // processes, do that in the FILE thread to not block the calling threads.
680 BrowserThread::PostTask(BrowserThread::FILE, FROM_HERE
,
681 base::Bind(&ShowAudioInputSettingsOnFileThread
,
685 SpeechRecognitionManagerImpl::Session::Session()
686 : id(kSessionIDInvalid
),
687 abort_requested(false),
688 listener_is_active(true) {
691 SpeechRecognitionManagerImpl::Session::~Session() {
694 } // namespace content