1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/browser/speech/speech_recognition_manager_impl.h"
8 #include "content/browser/browser_main_loop.h"
9 #include "content/browser/renderer_host/media/media_stream_manager.h"
10 #include "content/browser/renderer_host/media/media_stream_ui_proxy.h"
11 #include "content/browser/speech/google_one_shot_remote_engine.h"
12 #include "content/browser/speech/google_streaming_remote_engine.h"
13 #include "content/browser/speech/speech_recognition_engine.h"
14 #include "content/browser/speech/speech_recognizer_impl.h"
15 #include "content/public/browser/browser_thread.h"
16 #include "content/public/browser/content_browser_client.h"
17 #include "content/public/browser/resource_context.h"
18 #include "content/public/browser/speech_recognition_event_listener.h"
19 #include "content/public/browser/speech_recognition_manager_delegate.h"
20 #include "content/public/browser/speech_recognition_session_config.h"
21 #include "content/public/browser/speech_recognition_session_context.h"
22 #include "content/public/common/speech_recognition_error.h"
23 #include "content/public/common/speech_recognition_result.h"
24 #include "media/audio/audio_manager.h"
25 #include "media/audio/audio_manager_base.h"
27 #if defined(OS_ANDROID)
28 #include "content/browser/speech/speech_recognizer_impl_android.h"
35 SpeechRecognitionManager
* SpeechRecognitionManager::manager_for_tests_
;
39 SpeechRecognitionManagerImpl
* g_speech_recognition_manager_impl
;
41 void ShowAudioInputSettingsOnFileThread(media::AudioManager
* audio_manager
) {
42 DCHECK_CURRENTLY_ON(BrowserThread::FILE);
43 audio_manager
->ShowAudioInputSettings();
48 SpeechRecognitionManager
* SpeechRecognitionManager::GetInstance() {
49 if (manager_for_tests_
)
50 return manager_for_tests_
;
51 return SpeechRecognitionManagerImpl::GetInstance();
54 void SpeechRecognitionManager::SetManagerForTesting(
55 SpeechRecognitionManager
* manager
) {
56 manager_for_tests_
= manager
;
59 SpeechRecognitionManagerImpl
* SpeechRecognitionManagerImpl::GetInstance() {
60 return g_speech_recognition_manager_impl
;
63 SpeechRecognitionManagerImpl::SpeechRecognitionManagerImpl(
64 media::AudioManager
* audio_manager
,
65 MediaStreamManager
* media_stream_manager
)
66 : audio_manager_(audio_manager
),
67 media_stream_manager_(media_stream_manager
),
68 primary_session_id_(kSessionIDInvalid
),
69 last_session_id_(kSessionIDInvalid
),
70 is_dispatching_event_(false),
71 delegate_(GetContentClient()->browser()->
72 CreateSpeechRecognitionManagerDelegate()),
74 DCHECK(!g_speech_recognition_manager_impl
);
75 g_speech_recognition_manager_impl
= this;
78 SpeechRecognitionManagerImpl::~SpeechRecognitionManagerImpl() {
79 DCHECK(g_speech_recognition_manager_impl
);
80 g_speech_recognition_manager_impl
= NULL
;
82 for (SessionsTable::iterator it
= sessions_
.begin(); it
!= sessions_
.end();
84 // MediaStreamUIProxy must be deleted on the IO thread.
85 BrowserThread::DeleteSoon(BrowserThread::IO
, FROM_HERE
,
86 it
->second
->ui
.release());
92 int SpeechRecognitionManagerImpl::CreateSession(
93 const SpeechRecognitionSessionConfig
& config
) {
94 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
96 const int session_id
= GetNextSessionID();
97 DCHECK(!SessionExists(session_id
));
98 // Set-up the new session.
99 Session
* session
= new Session();
100 sessions_
[session_id
] = session
;
101 session
->id
= session_id
;
102 session
->config
= config
;
103 session
->context
= config
.initial_context
;
105 std::string hardware_info
;
106 bool can_report_metrics
= false;
108 delegate_
->GetDiagnosticInformation(&can_report_metrics
, &hardware_info
);
110 // The legacy api cannot use continuous mode.
111 DCHECK(!config
.is_legacy_api
|| !config
.continuous
);
113 #if !defined(OS_ANDROID)
114 // A SpeechRecognitionEngine (and corresponding Config) is required only
115 // when using SpeechRecognizerImpl, which performs the audio capture and
116 // endpointing in the browser. This is not the case of Android where, not
117 // only the speech recognition, but also the audio capture and endpointing
118 // activities performed outside of the browser (delegated via JNI to the
119 // Android API implementation).
121 SpeechRecognitionEngineConfig remote_engine_config
;
122 remote_engine_config
.language
= config
.language
;
123 remote_engine_config
.grammars
= config
.grammars
;
124 remote_engine_config
.audio_sample_rate
=
125 SpeechRecognizerImpl::kAudioSampleRate
;
126 remote_engine_config
.audio_num_bits_per_sample
=
127 SpeechRecognizerImpl::kNumBitsPerAudioSample
;
128 remote_engine_config
.filter_profanities
= config
.filter_profanities
;
129 remote_engine_config
.continuous
= config
.continuous
;
130 remote_engine_config
.interim_results
= config
.interim_results
;
131 remote_engine_config
.max_hypotheses
= config
.max_hypotheses
;
132 remote_engine_config
.hardware_info
= hardware_info
;
133 remote_engine_config
.origin_url
=
134 can_report_metrics
? config
.origin_url
: std::string();
135 remote_engine_config
.auth_token
= config
.auth_token
;
136 remote_engine_config
.auth_scope
= config
.auth_scope
;
137 remote_engine_config
.preamble
= config
.preamble
;
139 SpeechRecognitionEngine
* google_remote_engine
;
140 if (config
.is_legacy_api
) {
141 google_remote_engine
=
142 new GoogleOneShotRemoteEngine(config
.url_request_context_getter
.get());
144 google_remote_engine
= new GoogleStreamingRemoteEngine(
145 config
.url_request_context_getter
.get());
148 google_remote_engine
->SetConfig(remote_engine_config
);
150 session
->recognizer
= new SpeechRecognizerImpl(
154 config
.interim_results
,
155 google_remote_engine
);
157 session
->recognizer
= new SpeechRecognizerImplAndroid(this, session_id
);
162 void SpeechRecognitionManagerImpl::StartSession(int session_id
) {
163 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
164 if (!SessionExists(session_id
))
167 // If there is another active session, abort that.
168 if (primary_session_id_
!= kSessionIDInvalid
&&
169 primary_session_id_
!= session_id
) {
170 AbortSession(primary_session_id_
);
173 primary_session_id_
= session_id
;
176 delegate_
->CheckRecognitionIsAllowed(
178 base::Bind(&SpeechRecognitionManagerImpl::RecognitionAllowedCallback
,
179 weak_factory_
.GetWeakPtr(),
184 void SpeechRecognitionManagerImpl::RecognitionAllowedCallback(int session_id
,
187 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
188 if (!SessionExists(session_id
))
191 SessionsTable::iterator iter
= sessions_
.find(session_id
);
192 DCHECK(iter
!= sessions_
.end());
193 Session
* session
= iter
->second
;
195 if (session
->abort_requested
)
199 SpeechRecognitionSessionContext
& context
= session
->context
;
200 context
.label
= media_stream_manager_
->MakeMediaAccessRequest(
201 context
.render_process_id
,
202 context
.render_frame_id
,
204 StreamOptions(true, false),
205 GURL(context
.context_name
),
207 &SpeechRecognitionManagerImpl::MediaRequestPermissionCallback
,
208 weak_factory_
.GetWeakPtr(), session_id
));
213 base::MessageLoop::current()->PostTask(
215 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent
,
216 weak_factory_
.GetWeakPtr(),
220 OnRecognitionError(session_id
, SpeechRecognitionError(
221 SPEECH_RECOGNITION_ERROR_NOT_ALLOWED
));
222 base::MessageLoop::current()->PostTask(
224 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent
,
225 weak_factory_
.GetWeakPtr(),
231 void SpeechRecognitionManagerImpl::MediaRequestPermissionCallback(
233 const MediaStreamDevices
& devices
,
234 scoped_ptr
<MediaStreamUIProxy
> stream_ui
) {
235 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
237 SessionsTable::iterator iter
= sessions_
.find(session_id
);
238 if (iter
== sessions_
.end())
241 bool is_allowed
= !devices
.empty();
243 // Copy the approved devices array to the context for UI indication.
244 iter
->second
->context
.devices
= devices
;
246 // Save the UI object.
247 iter
->second
->ui
= stream_ui
.Pass();
250 // Clear the label to indicate the request has been done.
251 iter
->second
->context
.label
.clear();
253 // Notify the recognition about the request result.
254 RecognitionAllowedCallback(iter
->first
, false, is_allowed
);
257 void SpeechRecognitionManagerImpl::AbortSession(int session_id
) {
258 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
259 if (!SessionExists(session_id
))
262 SessionsTable::iterator iter
= sessions_
.find(session_id
);
263 iter
->second
->ui
.reset();
265 if (iter
->second
->abort_requested
)
268 iter
->second
->abort_requested
= true;
270 base::MessageLoop::current()->PostTask(
272 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent
,
273 weak_factory_
.GetWeakPtr(),
278 void SpeechRecognitionManagerImpl::StopAudioCaptureForSession(int session_id
) {
279 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
280 if (!SessionExists(session_id
))
283 SessionsTable::iterator iter
= sessions_
.find(session_id
);
284 iter
->second
->ui
.reset();
286 base::MessageLoop::current()->PostTask(
288 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent
,
289 weak_factory_
.GetWeakPtr(),
291 EVENT_STOP_CAPTURE
));
294 // Here begins the SpeechRecognitionEventListener interface implementation,
295 // which will simply relay the events to the proper listener registered for the
296 // particular session and to the catch-all listener provided by the delegate
299 void SpeechRecognitionManagerImpl::OnRecognitionStart(int session_id
) {
300 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
301 if (!SessionExists(session_id
))
304 SessionsTable::iterator iter
= sessions_
.find(session_id
);
305 if (iter
->second
->ui
) {
306 // Notify the UI that the devices are being used.
307 iter
->second
->ui
->OnStarted(base::Closure(),
308 MediaStreamUIProxy::WindowIdCallback());
311 DCHECK_EQ(primary_session_id_
, session_id
);
312 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
313 delegate_listener
->OnRecognitionStart(session_id
);
314 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
315 listener
->OnRecognitionStart(session_id
);
318 void SpeechRecognitionManagerImpl::OnAudioStart(int session_id
) {
319 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
320 if (!SessionExists(session_id
))
323 DCHECK_EQ(primary_session_id_
, session_id
);
324 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
325 delegate_listener
->OnAudioStart(session_id
);
326 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
327 listener
->OnAudioStart(session_id
);
330 void SpeechRecognitionManagerImpl::OnEnvironmentEstimationComplete(
332 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
333 if (!SessionExists(session_id
))
336 DCHECK_EQ(primary_session_id_
, session_id
);
337 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
338 delegate_listener
->OnEnvironmentEstimationComplete(session_id
);
339 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
340 listener
->OnEnvironmentEstimationComplete(session_id
);
343 void SpeechRecognitionManagerImpl::OnSoundStart(int session_id
) {
344 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
345 if (!SessionExists(session_id
))
348 DCHECK_EQ(primary_session_id_
, session_id
);
349 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
350 delegate_listener
->OnSoundStart(session_id
);
351 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
352 listener
->OnSoundStart(session_id
);
355 void SpeechRecognitionManagerImpl::OnSoundEnd(int session_id
) {
356 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
357 if (!SessionExists(session_id
))
360 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
361 delegate_listener
->OnSoundEnd(session_id
);
362 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
363 listener
->OnSoundEnd(session_id
);
366 void SpeechRecognitionManagerImpl::OnAudioEnd(int session_id
) {
367 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
368 if (!SessionExists(session_id
))
371 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
372 delegate_listener
->OnAudioEnd(session_id
);
373 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
374 listener
->OnAudioEnd(session_id
);
375 base::MessageLoop::current()->PostTask(
377 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent
,
378 weak_factory_
.GetWeakPtr(),
383 void SpeechRecognitionManagerImpl::OnRecognitionResults(
384 int session_id
, const SpeechRecognitionResults
& results
) {
385 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
386 if (!SessionExists(session_id
))
389 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
390 delegate_listener
->OnRecognitionResults(session_id
, results
);
391 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
392 listener
->OnRecognitionResults(session_id
, results
);
395 void SpeechRecognitionManagerImpl::OnRecognitionError(
396 int session_id
, const SpeechRecognitionError
& error
) {
397 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
398 if (!SessionExists(session_id
))
401 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
402 delegate_listener
->OnRecognitionError(session_id
, error
);
403 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
404 listener
->OnRecognitionError(session_id
, error
);
407 void SpeechRecognitionManagerImpl::OnAudioLevelsChange(
408 int session_id
, float volume
, float noise_volume
) {
409 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
410 if (!SessionExists(session_id
))
413 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
414 delegate_listener
->OnAudioLevelsChange(session_id
, volume
, noise_volume
);
415 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
416 listener
->OnAudioLevelsChange(session_id
, volume
, noise_volume
);
419 void SpeechRecognitionManagerImpl::OnRecognitionEnd(int session_id
) {
420 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
421 if (!SessionExists(session_id
))
424 if (SpeechRecognitionEventListener
* delegate_listener
= GetDelegateListener())
425 delegate_listener
->OnRecognitionEnd(session_id
);
426 if (SpeechRecognitionEventListener
* listener
= GetListener(session_id
))
427 listener
->OnRecognitionEnd(session_id
);
428 base::MessageLoop::current()->PostTask(
430 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent
,
431 weak_factory_
.GetWeakPtr(),
433 EVENT_RECOGNITION_ENDED
));
436 int SpeechRecognitionManagerImpl::GetSession(
437 int render_process_id
, int render_view_id
, int request_id
) const {
438 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
439 SessionsTable::const_iterator iter
;
440 for (iter
= sessions_
.begin(); iter
!= sessions_
.end(); ++iter
) {
441 const int session_id
= iter
->first
;
442 const SpeechRecognitionSessionContext
& context
= iter
->second
->context
;
443 if (context
.render_process_id
== render_process_id
&&
444 context
.render_view_id
== render_view_id
&&
445 context
.request_id
== request_id
) {
449 return kSessionIDInvalid
;
452 SpeechRecognitionSessionContext
453 SpeechRecognitionManagerImpl::GetSessionContext(int session_id
) const {
454 return GetSession(session_id
)->context
;
457 void SpeechRecognitionManagerImpl::AbortAllSessionsForRenderProcess(
458 int render_process_id
) {
459 // This method gracefully destroys sessions for the listener. However, since
460 // the listener itself is likely to be destroyed after this call, we avoid
461 // dispatching further events to it, marking the |listener_is_active| flag.
462 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
463 for (SessionsTable::iterator it
= sessions_
.begin(); it
!= sessions_
.end();
465 Session
* session
= it
->second
;
466 if (session
->context
.render_process_id
== render_process_id
) {
467 AbortSession(session
->id
);
468 session
->listener_is_active
= false;
473 void SpeechRecognitionManagerImpl::AbortAllSessionsForRenderView(
474 int render_process_id
,
475 int render_view_id
) {
476 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
477 for (SessionsTable::iterator it
= sessions_
.begin(); it
!= sessions_
.end();
479 Session
* session
= it
->second
;
480 if (session
->context
.render_process_id
== render_process_id
&&
481 session
->context
.render_view_id
== render_view_id
) {
482 AbortSession(session
->id
);
487 // ----------------------- Core FSM implementation ---------------------------
488 void SpeechRecognitionManagerImpl::DispatchEvent(int session_id
,
490 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
492 // There are some corner cases in which the session might be deleted (due to
493 // an EndRecognition event) between a request (e.g. Abort) and its dispatch.
494 if (!SessionExists(session_id
))
497 Session
* session
= GetSession(session_id
);
498 FSMState session_state
= GetSessionState(session_id
);
499 DCHECK_LE(session_state
, SESSION_STATE_MAX_VALUE
);
500 DCHECK_LE(event
, EVENT_MAX_VALUE
);
502 // Event dispatching must be sequential, otherwise it will break all the rules
503 // and the assumptions of the finite state automata model.
504 DCHECK(!is_dispatching_event_
);
505 is_dispatching_event_
= true;
506 ExecuteTransitionAndGetNextState(session
, session_state
, event
);
507 is_dispatching_event_
= false;
510 // This FSM handles the evolution of each session, from the viewpoint of the
511 // interaction with the user (that may be either the browser end-user which
512 // interacts with UI bubbles, or JS developer intracting with JS methods).
513 // All the events received by the SpeechRecognizer instances (one for each
514 // session) are always routed to the SpeechRecognitionEventListener(s)
515 // regardless the choices taken in this FSM.
516 void SpeechRecognitionManagerImpl::ExecuteTransitionAndGetNextState(
517 Session
* session
, FSMState session_state
, FSMEvent event
) {
518 // Note: since we're not tracking the state of the recognizer object, rather
519 // we're directly retrieving it (through GetSessionState), we see its events
520 // (that are AUDIO_ENDED and RECOGNITION_ENDED) after its state evolution
521 // (e.g., when we receive the AUDIO_ENDED event, the recognizer has just
522 // completed the transition from CAPTURING_AUDIO to WAITING_FOR_RESULT, thus
523 // we perceive the AUDIO_ENDED event in WAITING_FOR_RESULT).
524 // This makes the code below a bit tricky but avoids a lot of code for
525 // tracking and reconstructing asynchronously the state of the recognizer.
526 switch (session_state
) {
527 case SESSION_STATE_IDLE
:
530 return SessionStart(*session
);
532 return SessionAbort(*session
);
533 case EVENT_RECOGNITION_ENDED
:
534 return SessionDelete(session
);
535 case EVENT_STOP_CAPTURE
:
536 return SessionStopAudioCapture(*session
);
537 case EVENT_AUDIO_ENDED
:
541 case SESSION_STATE_CAPTURING_AUDIO
:
543 case EVENT_STOP_CAPTURE
:
544 return SessionStopAudioCapture(*session
);
546 return SessionAbort(*session
);
549 case EVENT_AUDIO_ENDED
:
550 case EVENT_RECOGNITION_ENDED
:
551 return NotFeasible(*session
, event
);
554 case SESSION_STATE_WAITING_FOR_RESULT
:
557 return SessionAbort(*session
);
558 case EVENT_AUDIO_ENDED
:
559 return ResetCapturingSessionId(*session
);
561 case EVENT_STOP_CAPTURE
:
563 case EVENT_RECOGNITION_ENDED
:
564 return NotFeasible(*session
, event
);
568 return NotFeasible(*session
, event
);
571 SpeechRecognitionManagerImpl::FSMState
572 SpeechRecognitionManagerImpl::GetSessionState(int session_id
) const {
573 Session
* session
= GetSession(session_id
);
574 if (!session
->recognizer
.get() || !session
->recognizer
->IsActive())
575 return SESSION_STATE_IDLE
;
576 if (session
->recognizer
->IsCapturingAudio())
577 return SESSION_STATE_CAPTURING_AUDIO
;
578 return SESSION_STATE_WAITING_FOR_RESULT
;
581 // ----------- Contract for all the FSM evolution functions below -------------
582 // - Are guaranteed to be executed in the IO thread;
583 // - Are guaranteed to be not reentrant (themselves and each other);
585 void SpeechRecognitionManagerImpl::SessionStart(const Session
& session
) {
586 DCHECK_EQ(primary_session_id_
, session
.id
);
587 const MediaStreamDevices
& devices
= session
.context
.devices
;
588 std::string device_id
;
589 if (devices
.empty()) {
590 // From the ask_user=false path, use the default device.
591 // TODO(xians): Abort the session after we do not need to support this path
593 device_id
= media::AudioManagerBase::kDefaultDeviceId
;
595 // From the ask_user=true path, use the selected device.
596 DCHECK_EQ(1u, devices
.size());
597 DCHECK_EQ(MEDIA_DEVICE_AUDIO_CAPTURE
, devices
.front().type
);
598 device_id
= devices
.front().id
;
601 session
.recognizer
->StartRecognition(device_id
);
604 void SpeechRecognitionManagerImpl::SessionAbort(const Session
& session
) {
605 if (primary_session_id_
== session
.id
)
606 primary_session_id_
= kSessionIDInvalid
;
607 DCHECK(session
.recognizer
.get());
608 session
.recognizer
->AbortRecognition();
611 void SpeechRecognitionManagerImpl::SessionStopAudioCapture(
612 const Session
& session
) {
613 DCHECK(session
.recognizer
.get());
614 session
.recognizer
->StopAudioCapture();
617 void SpeechRecognitionManagerImpl::ResetCapturingSessionId(
618 const Session
& session
) {
619 DCHECK_EQ(primary_session_id_
, session
.id
);
620 primary_session_id_
= kSessionIDInvalid
;
623 void SpeechRecognitionManagerImpl::SessionDelete(Session
* session
) {
624 DCHECK(session
->recognizer
.get() == NULL
|| !session
->recognizer
->IsActive());
625 if (primary_session_id_
== session
->id
)
626 primary_session_id_
= kSessionIDInvalid
;
627 if (!session
->context
.label
.empty())
628 media_stream_manager_
->CancelRequest(session
->context
.label
);
629 sessions_
.erase(session
->id
);
633 void SpeechRecognitionManagerImpl::NotFeasible(const Session
& session
,
635 NOTREACHED() << "Unfeasible event " << event
636 << " in state " << GetSessionState(session
.id
)
637 << " for session " << session
.id
;
640 int SpeechRecognitionManagerImpl::GetNextSessionID() {
642 // Deal with wrapping of last_session_id_. (How civilized).
643 if (last_session_id_
<= 0)
644 last_session_id_
= 1;
645 return last_session_id_
;
648 bool SpeechRecognitionManagerImpl::SessionExists(int session_id
) const {
649 return sessions_
.find(session_id
) != sessions_
.end();
652 SpeechRecognitionManagerImpl::Session
*
653 SpeechRecognitionManagerImpl::GetSession(int session_id
) const {
654 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
655 SessionsTable::const_iterator iter
= sessions_
.find(session_id
);
656 DCHECK(iter
!= sessions_
.end());
660 SpeechRecognitionEventListener
* SpeechRecognitionManagerImpl::GetListener(
661 int session_id
) const {
662 Session
* session
= GetSession(session_id
);
663 if (session
->listener_is_active
&& session
->config
.event_listener
)
664 return session
->config
.event_listener
.get();
668 SpeechRecognitionEventListener
*
669 SpeechRecognitionManagerImpl::GetDelegateListener() const {
670 return delegate_
.get() ? delegate_
->GetEventListener() : NULL
;
673 const SpeechRecognitionSessionConfig
&
674 SpeechRecognitionManagerImpl::GetSessionConfig(int session_id
) const {
675 return GetSession(session_id
)->config
;
678 bool SpeechRecognitionManagerImpl::HasAudioInputDevices() {
679 return audio_manager_
->HasAudioInputDevices();
682 base::string16
SpeechRecognitionManagerImpl::GetAudioInputDeviceModel() {
683 return audio_manager_
->GetAudioInputDeviceModel();
686 void SpeechRecognitionManagerImpl::ShowAudioInputSettings() {
687 // Since AudioManager::ShowAudioInputSettings can potentially launch external
688 // processes, do that in the FILE thread to not block the calling threads.
689 BrowserThread::PostTask(BrowserThread::FILE, FROM_HERE
,
690 base::Bind(&ShowAudioInputSettingsOnFileThread
,
694 SpeechRecognitionManagerImpl::Session::Session()
695 : id(kSessionIDInvalid
),
696 abort_requested(false),
697 listener_is_active(true) {
700 SpeechRecognitionManagerImpl::Session::~Session() {
703 } // namespace content