1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/browser/speech/speech_recognition_manager_impl.h"
8 #include "base/location.h"
9 #include "base/single_thread_task_runner.h"
10 #include "base/thread_task_runner_handle.h"
11 #include "content/browser/browser_main_loop.h"
12 #include "content/browser/renderer_host/media/media_stream_manager.h"
13 #include "content/browser/renderer_host/media/media_stream_ui_proxy.h"
14 #include "content/browser/speech/google_one_shot_remote_engine.h"
15 #include "content/browser/speech/google_streaming_remote_engine.h"
16 #include "content/browser/speech/speech_recognition_engine.h"
17 #include "content/browser/speech/speech_recognizer_impl.h"
18 #include "content/public/browser/browser_thread.h"
19 #include "content/public/browser/content_browser_client.h"
20 #include "content/public/browser/resource_context.h"
21 #include "content/public/browser/speech_recognition_event_listener.h"
22 #include "content/public/browser/speech_recognition_manager_delegate.h"
23 #include "content/public/browser/speech_recognition_session_config.h"
24 #include "content/public/browser/speech_recognition_session_context.h"
25 #include "content/public/common/speech_recognition_error.h"
26 #include "content/public/common/speech_recognition_result.h"
27 #include "media/audio/audio_manager.h"
28 #include "media/audio/audio_manager_base.h"
30 #if defined(OS_ANDROID)
31 #include "content/browser/speech/speech_recognizer_impl_android.h"
38 SpeechRecognitionManager
* SpeechRecognitionManager::manager_for_tests_
;
42 SpeechRecognitionManagerImpl
* g_speech_recognition_manager_impl
;
44 void ShowAudioInputSettingsOnFileThread(media::AudioManager
* audio_manager
) {
45 DCHECK_CURRENTLY_ON(BrowserThread::FILE);
46 audio_manager
->ShowAudioInputSettings();
51 SpeechRecognitionManager
* SpeechRecognitionManager::GetInstance() {
52 if (manager_for_tests_
)
53 return manager_for_tests_
;
54 return SpeechRecognitionManagerImpl::GetInstance();
57 void SpeechRecognitionManager::SetManagerForTesting(
58 SpeechRecognitionManager
* manager
) {
59 manager_for_tests_
= manager
;
62 SpeechRecognitionManagerImpl
* SpeechRecognitionManagerImpl::GetInstance() {
63 return g_speech_recognition_manager_impl
;
66 SpeechRecognitionManagerImpl::SpeechRecognitionManagerImpl(
67 media::AudioManager
* audio_manager
,
68 MediaStreamManager
* media_stream_manager
)
69 : audio_manager_(audio_manager
),
70 media_stream_manager_(media_stream_manager
),
71 primary_session_id_(kSessionIDInvalid
),
72 last_session_id_(kSessionIDInvalid
),
73 is_dispatching_event_(false),
74 delegate_(GetContentClient()->browser()->
75 CreateSpeechRecognitionManagerDelegate()),
77 DCHECK(!g_speech_recognition_manager_impl
);
78 g_speech_recognition_manager_impl
= this;
81 SpeechRecognitionManagerImpl::~SpeechRecognitionManagerImpl() {
82 DCHECK(g_speech_recognition_manager_impl
);
83 g_speech_recognition_manager_impl
= NULL
;
85 for (SessionsTable::iterator it
= sessions_
.begin(); it
!= sessions_
.end();
87 // MediaStreamUIProxy must be deleted on the IO thread.
88 BrowserThread::DeleteSoon(BrowserThread::IO
, FROM_HERE
,
89 it
->second
->ui
.release());
95 int SpeechRecognitionManagerImpl::CreateSession(
96 const SpeechRecognitionSessionConfig
& config
) {
97 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
99 const int session_id
= GetNextSessionID();
100 DCHECK(!SessionExists(session_id
));
101 // Set-up the new session.
102 Session
* session
= new Session();
103 sessions_
[session_id
] = session
;
104 session
->id
= session_id
;
105 session
->config
= config
;
106 session
->context
= config
.initial_context
;
108 std::string hardware_info
;
109 bool can_report_metrics
= false;
111 delegate_
->GetDiagnosticInformation(&can_report_metrics
, &hardware_info
);
113 // The legacy api cannot use continuous mode.
114 DCHECK(!config
.is_legacy_api
|| !config
.continuous
);
116 #if !defined(OS_ANDROID)
117 // A SpeechRecognitionEngine (and corresponding Config) is required only
118 // when using SpeechRecognizerImpl, which performs the audio capture and
119 // endpointing in the browser. This is not the case of Android where, not
120 // only the speech recognition, but also the audio capture and endpointing
121 // activities performed outside of the browser (delegated via JNI to the
122 // Android API implementation).
124 SpeechRecognitionEngineConfig remote_engine_config
;
125 remote_engine_config
.language
= config
.language
;
126 remote_engine_config
.grammars
= config
.grammars
;
127 remote_engine_config
.audio_sample_rate
=
128 SpeechRecognizerImpl::kAudioSampleRate
;
129 remote_engine_config
.audio_num_bits_per_sample
=
130 SpeechRecognizerImpl::kNumBitsPerAudioSample
;
131 remote_engine_config
.filter_profanities
= config
.filter_profanities
;
132 remote_engine_config
.continuous
= config
.continuous
;
133 remote_engine_config
.interim_results
= config
.interim_results
;
134 remote_engine_config
.max_hypotheses
= config
.max_hypotheses
;
135 remote_engine_config
.hardware_info
= hardware_info
;
136 remote_engine_config
.origin_url
= config
.origin_url
;
137 remote_engine_config
.auth_token
= config
.auth_token
;
138 remote_engine_config
.auth_scope
= config
.auth_scope
;
139 remote_engine_config
.preamble
= config
.preamble
;
141 SpeechRecognitionEngine
* google_remote_engine
;
142 if (config
.is_legacy_api
) {
143 google_remote_engine
=
144 new GoogleOneShotRemoteEngine(config
.url_request_context_getter
.get());
146 google_remote_engine
= new GoogleStreamingRemoteEngine(
147 config
.url_request_context_getter
.get());
150 google_remote_engine
->SetConfig(remote_engine_config
);
152 session
->recognizer
= new SpeechRecognizerImpl(
156 config
.interim_results
,
157 google_remote_engine
);
159 session
->recognizer
= new SpeechRecognizerImplAndroid(this, session_id
);
164 void SpeechRecognitionManagerImpl::StartSession(int session_id
) {
165 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
166 if (!SessionExists(session_id
))
169 // If there is another active session, abort that.
170 if (primary_session_id_
!= kSessionIDInvalid
&&
171 primary_session_id_
!= session_id
) {
172 AbortSession(primary_session_id_
);
175 primary_session_id_
= session_id
;
178 delegate_
->CheckRecognitionIsAllowed(
180 base::Bind(&SpeechRecognitionManagerImpl::RecognitionAllowedCallback
,
181 weak_factory_
.GetWeakPtr(),
186 void SpeechRecognitionManagerImpl::RecognitionAllowedCallback(int session_id
,
189 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
190 if (!SessionExists(session_id
))
193 SessionsTable::iterator iter
= sessions_
.find(session_id
);
194 DCHECK(iter
!= sessions_
.end());
195 Session
* session
= iter
->second
;
197 if (session
->abort_requested
)
201 SpeechRecognitionSessionContext
& context
= session
->context
;
202 context
.label
= media_stream_manager_
->MakeMediaAccessRequest(
203 context
.render_process_id
,
204 context
.render_frame_id
,
206 StreamOptions(true, false),
207 GURL(context
.context_name
),
209 &SpeechRecognitionManagerImpl::MediaRequestPermissionCallback
,
210 weak_factory_
.GetWeakPtr(), session_id
));
215 base::ThreadTaskRunnerHandle::Get()->PostTask(
217 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent
,
218 weak_factory_
.GetWeakPtr(), session_id
, EVENT_START
));
220 OnRecognitionError(session_id
, SpeechRecognitionError(
221 SPEECH_RECOGNITION_ERROR_NOT_ALLOWED
));
222 base::ThreadTaskRunnerHandle::Get()->PostTask(
224 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent
,
225 weak_factory_
.GetWeakPtr(), session_id
, EVENT_ABORT
));
229 void SpeechRecognitionManagerImpl::MediaRequestPermissionCallback(
231 const MediaStreamDevices
& devices
,
232 scoped_ptr
<MediaStreamUIProxy
> stream_ui
) {
233 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
235 SessionsTable::iterator iter
= sessions_
.find(session_id
);
236 if (iter
== sessions_
.end())
239 bool is_allowed
= !devices
.empty();
241 // Copy the approved devices array to the context for UI indication.
242 iter
->second
->context
.devices
= devices
;
244 // Save the UI object.
245 iter
->second
->ui
= stream_ui
.Pass();
248 // Clear the label to indicate the request has been done.
249 iter
->second
->context
.label
.clear();
251 // Notify the recognition about the request result.
252 RecognitionAllowedCallback(iter
->first
, false, is_allowed
);
255 void SpeechRecognitionManagerImpl::AbortSession(int session_id
) {
256 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
257 if (!SessionExists(session_id
))
260 SessionsTable::iterator iter
= sessions_
.find(session_id
);
261 iter
->second
->ui
.reset();
263 if (iter
->second
->abort_requested
)
266 iter
->second
->abort_requested
= true;
268 base::ThreadTaskRunnerHandle::Get()->PostTask(
270 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent
,
271 weak_factory_
.GetWeakPtr(), session_id
, EVENT_ABORT
));
274 void SpeechRecognitionManagerImpl::StopAudioCaptureForSession(int session_id
) {
275 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
276 if (!SessionExists(session_id
))
279 SessionsTable::iterator iter
= sessions_
.find(session_id
);
280 iter
->second
->ui
.reset();
282 base::ThreadTaskRunnerHandle::Get()->PostTask(
284 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent
,
285 weak_factory_
.GetWeakPtr(), session_id
, EVENT_STOP_CAPTURE
));
288 // Here begins the SpeechRecognitionEventListener interface implementation,
289 // which will simply relay the events to the proper listener registered for the
290 // particular session and to the catch-all listener provided by the delegate
293 void SpeechRecognitionManagerImpl::OnRecognitionStart(int session_id
) {
294 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
295 if (!SessionExists(session_id
))
298 SessionsTable::iterator iter
= sessions_
.find(session_id
);
299 if (iter
->second
->ui
) {
300 // Notify the UI that the devices are being used.
301 iter
->second
->ui
->OnStarted(base::Closure(),
302 MediaStreamUIProxy::WindowIdCallback());
305 DCHECK_EQ(primary_session_id_
, session_id
);
306 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
307 delegate_listener
->OnRecognitionStart(session_id
);
308 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
309 listener
->OnRecognitionStart(session_id
);
312 void SpeechRecognitionManagerImpl::OnAudioStart(int session_id
) {
313 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
314 if (!SessionExists(session_id
))
317 DCHECK_EQ(primary_session_id_
, session_id
);
318 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
319 delegate_listener
->OnAudioStart(session_id
);
320 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
321 listener
->OnAudioStart(session_id
);
324 void SpeechRecognitionManagerImpl::OnEnvironmentEstimationComplete(
326 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
327 if (!SessionExists(session_id
))
330 DCHECK_EQ(primary_session_id_
, session_id
);
331 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
332 delegate_listener
->OnEnvironmentEstimationComplete(session_id
);
333 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
334 listener
->OnEnvironmentEstimationComplete(session_id
);
337 void SpeechRecognitionManagerImpl::OnSoundStart(int session_id
) {
338 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
339 if (!SessionExists(session_id
))
342 DCHECK_EQ(primary_session_id_
, session_id
);
343 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
344 delegate_listener
->OnSoundStart(session_id
);
345 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
346 listener
->OnSoundStart(session_id
);
349 void SpeechRecognitionManagerImpl::OnSoundEnd(int session_id
) {
350 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
351 if (!SessionExists(session_id
))
354 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
355 delegate_listener
->OnSoundEnd(session_id
);
356 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
357 listener
->OnSoundEnd(session_id
);
360 void SpeechRecognitionManagerImpl::OnAudioEnd(int session_id
) {
361 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
362 if (!SessionExists(session_id
))
365 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
366 delegate_listener
->OnAudioEnd(session_id
);
367 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
368 listener
->OnAudioEnd(session_id
);
369 base::ThreadTaskRunnerHandle::Get()->PostTask(
371 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent
,
372 weak_factory_
.GetWeakPtr(), session_id
, EVENT_AUDIO_ENDED
));
375 void SpeechRecognitionManagerImpl::OnRecognitionResults(
376 int session_id
, const SpeechRecognitionResults
& results
) {
377 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
378 if (!SessionExists(session_id
))
381 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
382 delegate_listener
->OnRecognitionResults(session_id
, results
);
383 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
384 listener
->OnRecognitionResults(session_id
, results
);
387 void SpeechRecognitionManagerImpl::OnRecognitionError(
388 int session_id
, const SpeechRecognitionError
& error
) {
389 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
390 if (!SessionExists(session_id
))
393 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
394 delegate_listener
->OnRecognitionError(session_id
, error
);
395 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
396 listener
->OnRecognitionError(session_id
, error
);
399 void SpeechRecognitionManagerImpl::OnAudioLevelsChange(
400 int session_id
, float volume
, float noise_volume
) {
401 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
402 if (!SessionExists(session_id
))
405 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
406 delegate_listener
->OnAudioLevelsChange(session_id
, volume
, noise_volume
);
407 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
408 listener
->OnAudioLevelsChange(session_id
, volume
, noise_volume
);
411 void SpeechRecognitionManagerImpl::OnRecognitionEnd(int session_id
) {
412 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
413 if (!SessionExists(session_id
))
416 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
417 delegate_listener
->OnRecognitionEnd(session_id
);
418 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
419 listener
->OnRecognitionEnd(session_id
);
420 base::ThreadTaskRunnerHandle::Get()->PostTask(
421 FROM_HERE
, base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent
,
422 weak_factory_
.GetWeakPtr(), session_id
,
423 EVENT_RECOGNITION_ENDED
));
426 int SpeechRecognitionManagerImpl::GetSession(
427 int render_process_id
, int render_view_id
, int request_id
) const {
428 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
429 SessionsTable::const_iterator iter
;
430 for (iter
= sessions_
.begin(); iter
!= sessions_
.end(); ++iter
) {
431 const int session_id
= iter
->first
;
432 const SpeechRecognitionSessionContext
& context
= iter
->second
->context
;
433 if (context
.render_process_id
== render_process_id
&&
434 context
.render_view_id
== render_view_id
&&
435 context
.request_id
== request_id
) {
439 return kSessionIDInvalid
;
442 SpeechRecognitionSessionContext
443 SpeechRecognitionManagerImpl::GetSessionContext(int session_id
) const {
444 return GetSession(session_id
)->context
;
447 void SpeechRecognitionManagerImpl::AbortAllSessionsForRenderProcess(
448 int render_process_id
) {
449 // This method gracefully destroys sessions for the listener. However, since
450 // the listener itself is likely to be destroyed after this call, we avoid
451 // dispatching further events to it, marking the |listener_is_active| flag.
452 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
453 for (SessionsTable::iterator it
= sessions_
.begin(); it
!= sessions_
.end();
455 Session
* session
= it
->second
;
456 if (session
->context
.render_process_id
== render_process_id
) {
457 AbortSession(session
->id
);
458 session
->listener_is_active
= false;
463 void SpeechRecognitionManagerImpl::AbortAllSessionsForRenderView(
464 int render_process_id
,
465 int render_view_id
) {
466 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
467 for (SessionsTable::iterator it
= sessions_
.begin(); it
!= sessions_
.end();
469 Session
* session
= it
->second
;
470 if (session
->context
.render_process_id
== render_process_id
&&
471 session
->context
.render_view_id
== render_view_id
) {
472 AbortSession(session
->id
);
477 // ----------------------- Core FSM implementation ---------------------------
478 void SpeechRecognitionManagerImpl::DispatchEvent(int session_id
,
480 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
482 // There are some corner cases in which the session might be deleted (due to
483 // an EndRecognition event) between a request (e.g. Abort) and its dispatch.
484 if (!SessionExists(session_id
))
487 Session
* session
= GetSession(session_id
);
488 FSMState session_state
= GetSessionState(session_id
);
489 DCHECK_LE(session_state
, SESSION_STATE_MAX_VALUE
);
490 DCHECK_LE(event
, EVENT_MAX_VALUE
);
492 // Event dispatching must be sequential, otherwise it will break all the rules
493 // and the assumptions of the finite state automata model.
494 DCHECK(!is_dispatching_event_
);
495 is_dispatching_event_
= true;
496 ExecuteTransitionAndGetNextState(session
, session_state
, event
);
497 is_dispatching_event_
= false;
500 // This FSM handles the evolution of each session, from the viewpoint of the
501 // interaction with the user (that may be either the browser end-user which
502 // interacts with UI bubbles, or JS developer intracting with JS methods).
503 // All the events received by the SpeechRecognizer instances (one for each
504 // session) are always routed to the SpeechRecognitionEventListener(s)
505 // regardless the choices taken in this FSM.
506 void SpeechRecognitionManagerImpl::ExecuteTransitionAndGetNextState(
507 Session
* session
, FSMState session_state
, FSMEvent event
) {
508 // Note: since we're not tracking the state of the recognizer object, rather
509 // we're directly retrieving it (through GetSessionState), we see its events
510 // (that are AUDIO_ENDED and RECOGNITION_ENDED) after its state evolution
511 // (e.g., when we receive the AUDIO_ENDED event, the recognizer has just
512 // completed the transition from CAPTURING_AUDIO to WAITING_FOR_RESULT, thus
513 // we perceive the AUDIO_ENDED event in WAITING_FOR_RESULT).
514 // This makes the code below a bit tricky but avoids a lot of code for
515 // tracking and reconstructing asynchronously the state of the recognizer.
516 switch (session_state
) {
517 case SESSION_STATE_IDLE
:
520 return SessionStart(*session
);
522 return SessionAbort(*session
);
523 case EVENT_RECOGNITION_ENDED
:
524 return SessionDelete(session
);
525 case EVENT_STOP_CAPTURE
:
526 return SessionStopAudioCapture(*session
);
527 case EVENT_AUDIO_ENDED
:
531 case SESSION_STATE_CAPTURING_AUDIO
:
533 case EVENT_STOP_CAPTURE
:
534 return SessionStopAudioCapture(*session
);
536 return SessionAbort(*session
);
539 case EVENT_AUDIO_ENDED
:
540 case EVENT_RECOGNITION_ENDED
:
541 return NotFeasible(*session
, event
);
544 case SESSION_STATE_WAITING_FOR_RESULT
:
547 return SessionAbort(*session
);
548 case EVENT_AUDIO_ENDED
:
549 return ResetCapturingSessionId(*session
);
551 case EVENT_STOP_CAPTURE
:
553 case EVENT_RECOGNITION_ENDED
:
554 return NotFeasible(*session
, event
);
558 return NotFeasible(*session
, event
);
561 SpeechRecognitionManagerImpl::FSMState
562 SpeechRecognitionManagerImpl::GetSessionState(int session_id
) const {
563 Session
* session
= GetSession(session_id
);
564 if (!session
->recognizer
.get() || !session
->recognizer
->IsActive())
565 return SESSION_STATE_IDLE
;
566 if (session
->recognizer
->IsCapturingAudio())
567 return SESSION_STATE_CAPTURING_AUDIO
;
568 return SESSION_STATE_WAITING_FOR_RESULT
;
571 // ----------- Contract for all the FSM evolution functions below -------------
572 // - Are guaranteed to be executed in the IO thread;
573 // - Are guaranteed to be not reentrant (themselves and each other);
575 void SpeechRecognitionManagerImpl::SessionStart(const Session
& session
) {
576 DCHECK_EQ(primary_session_id_
, session
.id
);
577 const MediaStreamDevices
& devices
= session
.context
.devices
;
578 std::string device_id
;
579 if (devices
.empty()) {
580 // From the ask_user=false path, use the default device.
581 // TODO(xians): Abort the session after we do not need to support this path
583 device_id
= media::AudioManagerBase::kDefaultDeviceId
;
585 // From the ask_user=true path, use the selected device.
586 DCHECK_EQ(1u, devices
.size());
587 DCHECK_EQ(MEDIA_DEVICE_AUDIO_CAPTURE
, devices
.front().type
);
588 device_id
= devices
.front().id
;
591 session
.recognizer
->StartRecognition(device_id
);
594 void SpeechRecognitionManagerImpl::SessionAbort(const Session
& session
) {
595 if (primary_session_id_
== session
.id
)
596 primary_session_id_
= kSessionIDInvalid
;
597 DCHECK(session
.recognizer
.get());
598 session
.recognizer
->AbortRecognition();
601 void SpeechRecognitionManagerImpl::SessionStopAudioCapture(
602 const Session
& session
) {
603 DCHECK(session
.recognizer
.get());
604 session
.recognizer
->StopAudioCapture();
607 void SpeechRecognitionManagerImpl::ResetCapturingSessionId(
608 const Session
& session
) {
609 DCHECK_EQ(primary_session_id_
, session
.id
);
610 primary_session_id_
= kSessionIDInvalid
;
613 void SpeechRecognitionManagerImpl::SessionDelete(Session
* session
) {
614 DCHECK(session
->recognizer
.get() == NULL
|| !session
->recognizer
->IsActive());
615 if (primary_session_id_
== session
->id
)
616 primary_session_id_
= kSessionIDInvalid
;
617 if (!session
->context
.label
.empty())
618 media_stream_manager_
->CancelRequest(session
->context
.label
);
619 sessions_
.erase(session
->id
);
623 void SpeechRecognitionManagerImpl::NotFeasible(const Session
& session
,
625 NOTREACHED() << "Unfeasible event " << event
626 << " in state " << GetSessionState(session
.id
)
627 << " for session " << session
.id
;
630 int SpeechRecognitionManagerImpl::GetNextSessionID() {
632 // Deal with wrapping of last_session_id_. (How civilized).
633 if (last_session_id_
<= 0)
634 last_session_id_
= 1;
635 return last_session_id_
;
638 bool SpeechRecognitionManagerImpl::SessionExists(int session_id
) const {
639 return sessions_
.find(session_id
) != sessions_
.end();
642 SpeechRecognitionManagerImpl::Session
*
643 SpeechRecognitionManagerImpl::GetSession(int session_id
) const {
644 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
645 SessionsTable::const_iterator iter
= sessions_
.find(session_id
);
646 DCHECK(iter
!= sessions_
.end());
650 SpeechRecognitionEventListener
* SpeechRecognitionManagerImpl::GetListener(
651 int session_id
) const {
652 Session
* session
= GetSession(session_id
);
653 if (session
->listener_is_active
&& session
->config
.event_listener
)
654 return session
->config
.event_listener
.get();
658 SpeechRecognitionEventListener
*
659 SpeechRecognitionManagerImpl::GetDelegateListener() const {
660 return delegate_
.get() ? delegate_
->GetEventListener() : NULL
;
663 const SpeechRecognitionSessionConfig
&
664 SpeechRecognitionManagerImpl::GetSessionConfig(int session_id
) const {
665 return GetSession(session_id
)->config
;
668 bool SpeechRecognitionManagerImpl::HasAudioInputDevices() {
669 return audio_manager_
->HasAudioInputDevices();
672 base::string16
SpeechRecognitionManagerImpl::GetAudioInputDeviceModel() {
673 return audio_manager_
->GetAudioInputDeviceModel();
676 void SpeechRecognitionManagerImpl::ShowAudioInputSettings() {
677 // Since AudioManager::ShowAudioInputSettings can potentially launch external
678 // processes, do that in the FILE thread to not block the calling threads.
679 BrowserThread::PostTask(BrowserThread::FILE, FROM_HERE
,
680 base::Bind(&ShowAudioInputSettingsOnFileThread
,
684 SpeechRecognitionManagerImpl::Session::Session()
685 : id(kSessionIDInvalid
),
686 abort_requested(false),
687 listener_is_active(true) {
690 SpeechRecognitionManagerImpl::Session::~Session() {
693 } // namespace content