1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/browser/speech/speech_recognizer_impl.h"
7 #include "base/basictypes.h"
9 #include "base/time/time.h"
10 #include "content/browser/browser_main_loop.h"
11 #include "content/browser/media/media_internals.h"
12 #include "content/browser/speech/audio_buffer.h"
13 #include "content/browser/speech/google_one_shot_remote_engine.h"
14 #include "content/public/browser/speech_recognition_event_listener.h"
15 #include "media/base/audio_converter.h"
18 #include "media/audio/win/core_audio_util_win.h"
21 using media::AudioBus
;
22 using media::AudioConverter
;
23 using media::AudioInputController
;
24 using media::AudioManager
;
25 using media::AudioParameters
;
26 using media::ChannelLayout
;
30 // Private class which encapsulates the audio converter and the
31 // AudioConverter::InputCallback. It handles resampling, buffering and
32 // channel mixing between input and output parameters.
33 class SpeechRecognizerImpl::OnDataConverter
34 : public media::AudioConverter::InputCallback
{
36 OnDataConverter(const AudioParameters
& input_params
,
37 const AudioParameters
& output_params
);
38 ~OnDataConverter() override
;
40 // Converts input audio |data| bus into an AudioChunk where the input format
41 // is given by |input_parameters_| and the output format by
42 // |output_parameters_|.
43 scoped_refptr
<AudioChunk
> Convert(const AudioBus
* data
);
45 bool data_was_converted() const { return data_was_converted_
; }
48 // media::AudioConverter::InputCallback implementation.
49 double ProvideInput(AudioBus
* dest
, base::TimeDelta buffer_delay
) override
;
51 // Handles resampling, buffering, and channel mixing between input and output
53 AudioConverter audio_converter_
;
55 scoped_ptr
<AudioBus
> input_bus_
;
56 scoped_ptr
<AudioBus
> output_bus_
;
57 const AudioParameters input_parameters_
;
58 const AudioParameters output_parameters_
;
59 bool data_was_converted_
;
61 DISALLOW_COPY_AND_ASSIGN(OnDataConverter
);
66 // The following constants are related to the volume level indicator shown in
67 // the UI for recorded audio.
68 // Multiplier used when new volume is greater than previous level.
69 const float kUpSmoothingFactor
= 1.0f
;
70 // Multiplier used when new volume is lesser than previous level.
71 const float kDownSmoothingFactor
= 0.7f
;
72 // RMS dB value of a maximum (unclipped) sine wave for int16 samples.
73 const float kAudioMeterMaxDb
= 90.31f
;
74 // This value corresponds to RMS dB for int16 with 6 most-significant-bits = 0.
75 // Values lower than this will display as empty level-meter.
76 const float kAudioMeterMinDb
= 30.0f
;
77 const float kAudioMeterDbRange
= kAudioMeterMaxDb
- kAudioMeterMinDb
;
79 // Maximum level to draw to display unclipped meter. (1.0f displays clipping.)
80 const float kAudioMeterRangeMaxUnclipped
= 47.0f
/ 48.0f
;
82 // Returns true if more than 5% of the samples are at min or max value.
83 bool DetectClipping(const AudioChunk
& chunk
) {
84 const int num_samples
= chunk
.NumSamples();
85 const int16
* samples
= chunk
.SamplesData16();
86 const int kThreshold
= num_samples
/ 20;
87 int clipping_samples
= 0;
89 for (int i
= 0; i
< num_samples
; ++i
) {
90 if (samples
[i
] <= -32767 || samples
[i
] >= 32767) {
91 if (++clipping_samples
> kThreshold
)
98 void KeepAudioControllerRefcountedForDtor(scoped_refptr
<AudioInputController
>) {
103 const int SpeechRecognizerImpl::kAudioSampleRate
= 16000;
104 const ChannelLayout
SpeechRecognizerImpl::kChannelLayout
=
105 media::CHANNEL_LAYOUT_MONO
;
106 const int SpeechRecognizerImpl::kNumBitsPerAudioSample
= 16;
107 const int SpeechRecognizerImpl::kNoSpeechTimeoutMs
= 8000;
108 const int SpeechRecognizerImpl::kEndpointerEstimationTimeMs
= 300;
109 media::AudioManager
* SpeechRecognizerImpl::audio_manager_for_tests_
= NULL
;
111 static_assert(SpeechRecognizerImpl::kNumBitsPerAudioSample
% 8 == 0,
112 "kNumBitsPerAudioSample must be a multiple of 8");
114 // SpeechRecognizerImpl::OnDataConverter implementation
116 SpeechRecognizerImpl::OnDataConverter::OnDataConverter(
117 const AudioParameters
& input_params
,
118 const AudioParameters
& output_params
)
119 : audio_converter_(input_params
, output_params
, false),
120 input_bus_(AudioBus::Create(input_params
)),
121 output_bus_(AudioBus::Create(output_params
)),
122 input_parameters_(input_params
),
123 output_parameters_(output_params
),
124 data_was_converted_(false) {
125 audio_converter_
.AddInput(this);
126 audio_converter_
.PrimeWithSilence();
129 SpeechRecognizerImpl::OnDataConverter::~OnDataConverter() {
130 // It should now be safe to unregister the converter since no more OnData()
131 // callbacks are outstanding at this point.
132 audio_converter_
.RemoveInput(this);
135 scoped_refptr
<AudioChunk
> SpeechRecognizerImpl::OnDataConverter::Convert(
136 const AudioBus
* data
) {
137 CHECK_EQ(data
->frames(), input_parameters_
.frames_per_buffer());
138 data_was_converted_
= false;
139 // Copy recorded audio to the |input_bus_| for later use in ProvideInput().
140 data
->CopyTo(input_bus_
.get());
141 // Convert the audio and place the result in |output_bus_|. This call will
142 // result in a ProvideInput() callback where the actual input is provided.
143 // However, it can happen that the converter contains enough cached data
144 // to return a result without calling ProvideInput(). The caller of this
145 // method should check the state of data_was_converted_() and make an
146 // additional call if it is set to false at return.
147 // See http://crbug.com/506051 for details.
148 audio_converter_
.Convert(output_bus_
.get());
149 // Create an audio chunk based on the converted result.
150 scoped_refptr
<AudioChunk
> chunk(
151 new AudioChunk(output_parameters_
.GetBytesPerBuffer(),
152 output_parameters_
.bits_per_sample() / 8));
153 output_bus_
->ToInterleaved(output_bus_
->frames(),
154 output_parameters_
.bits_per_sample() / 8,
155 chunk
->writable_data());
159 double SpeechRecognizerImpl::OnDataConverter::ProvideInput(
160 AudioBus
* dest
, base::TimeDelta buffer_delay
) {
161 // Read from the input bus to feed the converter.
162 input_bus_
->CopyTo(dest
);
163 // Indicate that the recorded audio has in fact been used by the converter.
164 data_was_converted_
= true;
168 // SpeechRecognizerImpl implementation
170 SpeechRecognizerImpl::SpeechRecognizerImpl(
171 SpeechRecognitionEventListener
* listener
,
174 bool provisional_results
,
175 SpeechRecognitionEngine
* engine
)
176 : SpeechRecognizer(listener
, session_id
),
177 recognition_engine_(engine
),
178 endpointer_(kAudioSampleRate
),
179 audio_log_(MediaInternals::GetInstance()->CreateAudioLog(
180 media::AudioLogFactory::AUDIO_INPUT_CONTROLLER
)),
181 is_dispatching_event_(false),
182 provisional_results_(provisional_results
),
184 DCHECK(recognition_engine_
!= NULL
);
186 // In single shot (non-continous) recognition,
187 // the session is automatically ended after:
188 // - 0.5 seconds of silence if time < 3 seconds
189 // - 1 seconds of silence if time >= 3 seconds
190 endpointer_
.set_speech_input_complete_silence_length(
191 base::Time::kMicrosecondsPerSecond
/ 2);
192 endpointer_
.set_long_speech_input_complete_silence_length(
193 base::Time::kMicrosecondsPerSecond
);
194 endpointer_
.set_long_speech_length(3 * base::Time::kMicrosecondsPerSecond
);
196 // In continuous recognition, the session is automatically ended after 15
197 // seconds of silence.
198 const int64 cont_timeout_us
= base::Time::kMicrosecondsPerSecond
* 15;
199 endpointer_
.set_speech_input_complete_silence_length(cont_timeout_us
);
200 endpointer_
.set_long_speech_length(0); // Use only a single timeout.
202 endpointer_
.StartSession();
203 recognition_engine_
->set_delegate(this);
206 // ------- Methods that trigger Finite State Machine (FSM) events ------------
208 // NOTE:all the external events and requests should be enqueued (PostTask), even
209 // if they come from the same (IO) thread, in order to preserve the relationship
210 // of causality between events and avoid interleaved event processing due to
211 // synchronous callbacks.
213 void SpeechRecognizerImpl::StartRecognition(const std::string
& device_id
) {
214 DCHECK(!device_id
.empty());
215 device_id_
= device_id
;
217 BrowserThread::PostTask(BrowserThread::IO
, FROM_HERE
,
218 base::Bind(&SpeechRecognizerImpl::DispatchEvent
,
219 this, FSMEventArgs(EVENT_START
)));
222 void SpeechRecognizerImpl::AbortRecognition() {
223 BrowserThread::PostTask(BrowserThread::IO
, FROM_HERE
,
224 base::Bind(&SpeechRecognizerImpl::DispatchEvent
,
225 this, FSMEventArgs(EVENT_ABORT
)));
228 void SpeechRecognizerImpl::StopAudioCapture() {
229 BrowserThread::PostTask(BrowserThread::IO
, FROM_HERE
,
230 base::Bind(&SpeechRecognizerImpl::DispatchEvent
,
231 this, FSMEventArgs(EVENT_STOP_CAPTURE
)));
234 bool SpeechRecognizerImpl::IsActive() const {
235 // Checking the FSM state from another thread (thus, while the FSM is
236 // potentially concurrently evolving) is meaningless.
237 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
238 return state_
!= STATE_IDLE
&& state_
!= STATE_ENDED
;
241 bool SpeechRecognizerImpl::IsCapturingAudio() const {
242 DCHECK_CURRENTLY_ON(BrowserThread::IO
); // See IsActive().
243 const bool is_capturing_audio
= state_
>= STATE_STARTING
&&
244 state_
<= STATE_RECOGNIZING
;
245 DCHECK((is_capturing_audio
&& (audio_controller_
.get() != NULL
)) ||
246 (!is_capturing_audio
&& audio_controller_
.get() == NULL
));
247 return is_capturing_audio
;
250 const SpeechRecognitionEngine
&
251 SpeechRecognizerImpl::recognition_engine() const {
252 return *(recognition_engine_
.get());
255 SpeechRecognizerImpl::~SpeechRecognizerImpl() {
256 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
257 endpointer_
.EndSession();
258 if (audio_controller_
.get()) {
259 audio_controller_
->Close(
260 base::Bind(&KeepAudioControllerRefcountedForDtor
, audio_controller_
));
261 audio_log_
->OnClosed(0);
265 // Invoked in the audio thread.
266 void SpeechRecognizerImpl::OnError(AudioInputController
* controller
,
267 media::AudioInputController::ErrorCode error_code
) {
268 FSMEventArgs
event_args(EVENT_AUDIO_ERROR
);
269 BrowserThread::PostTask(BrowserThread::IO
, FROM_HERE
,
270 base::Bind(&SpeechRecognizerImpl::DispatchEvent
,
274 void SpeechRecognizerImpl::OnData(AudioInputController
* controller
,
275 const AudioBus
* data
) {
276 // Convert audio from native format to fixed format used by WebSpeech.
277 FSMEventArgs
event_args(EVENT_AUDIO_DATA
);
278 event_args
.audio_data
= audio_converter_
->Convert(data
);
279 BrowserThread::PostTask(BrowserThread::IO
, FROM_HERE
,
280 base::Bind(&SpeechRecognizerImpl::DispatchEvent
,
282 // See http://crbug.com/506051 regarding why one extra convert call can
283 // sometimes be required. It should be a rare case.
284 if (!audio_converter_
->data_was_converted()) {
285 event_args
.audio_data
= audio_converter_
->Convert(data
);
286 BrowserThread::PostTask(BrowserThread::IO
, FROM_HERE
,
287 base::Bind(&SpeechRecognizerImpl::DispatchEvent
,
290 // Something is seriously wrong here and we are most likely missing some
292 CHECK(audio_converter_
->data_was_converted());
295 void SpeechRecognizerImpl::OnAudioClosed(AudioInputController
*) {}
297 void SpeechRecognizerImpl::OnSpeechRecognitionEngineResults(
298 const SpeechRecognitionResults
& results
) {
299 FSMEventArgs
event_args(EVENT_ENGINE_RESULT
);
300 event_args
.engine_results
= results
;
301 BrowserThread::PostTask(BrowserThread::IO
, FROM_HERE
,
302 base::Bind(&SpeechRecognizerImpl::DispatchEvent
,
306 void SpeechRecognizerImpl::OnSpeechRecognitionEngineError(
307 const SpeechRecognitionError
& error
) {
308 FSMEventArgs
event_args(EVENT_ENGINE_ERROR
);
309 event_args
.engine_error
= error
;
310 BrowserThread::PostTask(BrowserThread::IO
, FROM_HERE
,
311 base::Bind(&SpeechRecognizerImpl::DispatchEvent
,
315 // ----------------------- Core FSM implementation ---------------------------
316 // TODO(primiano): After the changes in the media package (r129173), this class
317 // slightly violates the SpeechRecognitionEventListener interface contract. In
318 // particular, it is not true anymore that this class can be freed after the
319 // OnRecognitionEnd event, since the audio_controller_.Close() asynchronous
320 // call can be still in progress after the end event. Currently, it does not
321 // represent a problem for the browser itself, since refcounting protects us
322 // against such race conditions. However, we should fix this in the next CLs.
323 // For instance, tests are currently working just because the
324 // TestAudioInputController is not closing asynchronously as the real controller
325 // does, but they will become flaky if TestAudioInputController will be fixed.
327 void SpeechRecognizerImpl::DispatchEvent(const FSMEventArgs
& event_args
) {
328 DCHECK_CURRENTLY_ON(BrowserThread::IO
);
329 DCHECK_LE(event_args
.event
, EVENT_MAX_VALUE
);
330 DCHECK_LE(state_
, STATE_MAX_VALUE
);
332 // Event dispatching must be sequential, otherwise it will break all the rules
333 // and the assumptions of the finite state automata model.
334 DCHECK(!is_dispatching_event_
);
335 is_dispatching_event_
= true;
337 // Guard against the delegate freeing us until we finish processing the event.
338 scoped_refptr
<SpeechRecognizerImpl
> me(this);
340 if (event_args
.event
== EVENT_AUDIO_DATA
) {
341 DCHECK(event_args
.audio_data
.get() != NULL
);
342 ProcessAudioPipeline(*event_args
.audio_data
.get());
345 // The audio pipeline must be processed before the event dispatch, otherwise
346 // it would take actions according to the future state instead of the current.
347 state_
= ExecuteTransitionAndGetNextState(event_args
);
348 is_dispatching_event_
= false;
351 SpeechRecognizerImpl::FSMState
352 SpeechRecognizerImpl::ExecuteTransitionAndGetNextState(
353 const FSMEventArgs
& event_args
) {
354 const FSMEvent event
= event_args
.event
;
358 // TODO(primiano): restore UNREACHABLE_CONDITION on EVENT_ABORT and
359 // EVENT_STOP_CAPTURE below once speech input extensions are fixed.
361 return AbortSilently(event_args
);
363 return StartRecording(event_args
);
364 case EVENT_STOP_CAPTURE
:
365 return AbortSilently(event_args
);
366 case EVENT_AUDIO_DATA
: // Corner cases related to queued messages
367 case EVENT_ENGINE_RESULT
: // being lately dispatched.
368 case EVENT_ENGINE_ERROR
:
369 case EVENT_AUDIO_ERROR
:
370 return DoNothing(event_args
);
376 return AbortWithError(event_args
);
378 return NotFeasible(event_args
);
379 case EVENT_STOP_CAPTURE
:
380 return AbortSilently(event_args
);
381 case EVENT_AUDIO_DATA
:
382 return StartRecognitionEngine(event_args
);
383 case EVENT_ENGINE_RESULT
:
384 return NotFeasible(event_args
);
385 case EVENT_ENGINE_ERROR
:
386 case EVENT_AUDIO_ERROR
:
387 return AbortWithError(event_args
);
390 case STATE_ESTIMATING_ENVIRONMENT
:
393 return AbortWithError(event_args
);
395 return NotFeasible(event_args
);
396 case EVENT_STOP_CAPTURE
:
397 return StopCaptureAndWaitForResult(event_args
);
398 case EVENT_AUDIO_DATA
:
399 return WaitEnvironmentEstimationCompletion(event_args
);
400 case EVENT_ENGINE_RESULT
:
401 return ProcessIntermediateResult(event_args
);
402 case EVENT_ENGINE_ERROR
:
403 case EVENT_AUDIO_ERROR
:
404 return AbortWithError(event_args
);
407 case STATE_WAITING_FOR_SPEECH
:
410 return AbortWithError(event_args
);
412 return NotFeasible(event_args
);
413 case EVENT_STOP_CAPTURE
:
414 return StopCaptureAndWaitForResult(event_args
);
415 case EVENT_AUDIO_DATA
:
416 return DetectUserSpeechOrTimeout(event_args
);
417 case EVENT_ENGINE_RESULT
:
418 return ProcessIntermediateResult(event_args
);
419 case EVENT_ENGINE_ERROR
:
420 case EVENT_AUDIO_ERROR
:
421 return AbortWithError(event_args
);
424 case STATE_RECOGNIZING
:
427 return AbortWithError(event_args
);
429 return NotFeasible(event_args
);
430 case EVENT_STOP_CAPTURE
:
431 return StopCaptureAndWaitForResult(event_args
);
432 case EVENT_AUDIO_DATA
:
433 return DetectEndOfSpeech(event_args
);
434 case EVENT_ENGINE_RESULT
:
435 return ProcessIntermediateResult(event_args
);
436 case EVENT_ENGINE_ERROR
:
437 case EVENT_AUDIO_ERROR
:
438 return AbortWithError(event_args
);
441 case STATE_WAITING_FINAL_RESULT
:
444 return AbortWithError(event_args
);
446 return NotFeasible(event_args
);
447 case EVENT_STOP_CAPTURE
:
448 case EVENT_AUDIO_DATA
:
449 return DoNothing(event_args
);
450 case EVENT_ENGINE_RESULT
:
451 return ProcessFinalResult(event_args
);
452 case EVENT_ENGINE_ERROR
:
453 case EVENT_AUDIO_ERROR
:
454 return AbortWithError(event_args
);
458 // TODO(primiano): remove this state when speech input extensions support
459 // will be removed and STATE_IDLE.EVENT_ABORT,EVENT_STOP_CAPTURE will be
460 // reset to NotFeasible (see TODO above).
462 return DoNothing(event_args
);
464 return NotFeasible(event_args
);
467 // ----------- Contract for all the FSM evolution functions below -------------
468 // - Are guaranteed to be executed in the IO thread;
469 // - Are guaranteed to be not reentrant (themselves and each other);
470 // - event_args members are guaranteed to be stable during the call;
471 // - The class won't be freed in the meanwhile due to callbacks;
472 // - IsCapturingAudio() returns true if and only if audio_controller_ != NULL.
474 // TODO(primiano): the audio pipeline is currently serial. However, the
475 // clipper->endpointer->vumeter chain and the sr_engine could be parallelized.
476 // We should profile the execution to see if it would be worth or not.
477 void SpeechRecognizerImpl::ProcessAudioPipeline(const AudioChunk
& raw_audio
) {
478 const bool route_to_endpointer
= state_
>= STATE_ESTIMATING_ENVIRONMENT
&&
479 state_
<= STATE_RECOGNIZING
;
480 const bool route_to_sr_engine
= route_to_endpointer
;
481 const bool route_to_vumeter
= state_
>= STATE_WAITING_FOR_SPEECH
&&
482 state_
<= STATE_RECOGNIZING
;
483 const bool clip_detected
= DetectClipping(raw_audio
);
486 num_samples_recorded_
+= raw_audio
.NumSamples();
488 if (route_to_endpointer
)
489 endpointer_
.ProcessAudio(raw_audio
, &rms
);
491 if (route_to_vumeter
) {
492 DCHECK(route_to_endpointer
); // Depends on endpointer due to |rms|.
493 UpdateSignalAndNoiseLevels(rms
, clip_detected
);
495 if (route_to_sr_engine
) {
496 DCHECK(recognition_engine_
.get() != NULL
);
497 recognition_engine_
->TakeAudioChunk(raw_audio
);
501 SpeechRecognizerImpl::FSMState
502 SpeechRecognizerImpl::StartRecording(const FSMEventArgs
&) {
503 DCHECK(recognition_engine_
.get() != NULL
);
504 DCHECK(!IsCapturingAudio());
505 const bool unit_test_is_active
= (audio_manager_for_tests_
!= NULL
);
506 AudioManager
* audio_manager
= unit_test_is_active
?
507 audio_manager_for_tests_
:
509 DCHECK(audio_manager
!= NULL
);
511 DVLOG(1) << "SpeechRecognizerImpl starting audio capture.";
512 num_samples_recorded_
= 0;
514 listener()->OnRecognitionStart(session_id());
516 // TODO(xians): Check if the OS has the device with |device_id_|, return
517 // |SPEECH_AUDIO_ERROR_DETAILS_NO_MIC| if the target device does not exist.
518 if (!audio_manager
->HasAudioInputDevices()) {
519 return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO_CAPTURE
,
520 SPEECH_AUDIO_ERROR_DETAILS_NO_MIC
));
523 int chunk_duration_ms
= recognition_engine_
->GetDesiredAudioChunkDurationMs();
525 AudioParameters in_params
= audio_manager
->GetInputStreamParameters(
527 if (!in_params
.IsValid() && !unit_test_is_active
) {
528 DLOG(ERROR
) << "Invalid native audio input parameters";
530 SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO_CAPTURE
));
533 // Audio converter shall provide audio based on these parameters as output.
534 // Hard coded, WebSpeech specific parameters are utilized here.
535 int frames_per_buffer
= (kAudioSampleRate
* chunk_duration_ms
) / 1000;
536 AudioParameters output_parameters
= AudioParameters(
537 AudioParameters::AUDIO_PCM_LOW_LATENCY
, kChannelLayout
, kAudioSampleRate
,
538 kNumBitsPerAudioSample
, frames_per_buffer
);
539 DVLOG(1) << "SRI::output_parameters: "
540 << output_parameters
.AsHumanReadableString();
542 // Audio converter will receive audio based on these parameters as input.
543 // On Windows we start by verifying that Core Audio is supported. If not,
544 // the WaveIn API is used and we might as well avoid all audio conversations
545 // since WaveIn does the conversion for us.
546 // TODO(henrika): this code should be moved to platform dependent audio
548 bool use_native_audio_params
= true;
550 use_native_audio_params
= media::CoreAudioUtil::IsSupported();
551 DVLOG_IF(1, !use_native_audio_params
) << "Reverting to WaveIn for WebSpeech";
554 AudioParameters input_parameters
= output_parameters
;
555 if (use_native_audio_params
&& !unit_test_is_active
) {
556 // Use native audio parameters but avoid opening up at the native buffer
557 // size. Instead use same frame size (in milliseconds) as WebSpeech uses.
558 // We rely on internal buffers in the audio back-end to fulfill this request
559 // and the idea is to simplify the audio conversion since each Convert()
560 // call will then render exactly one ProvideInput() call.
561 // in_params.sample_rate()
563 ((in_params
.sample_rate() * chunk_duration_ms
) / 1000.0) + 0.5;
564 input_parameters
.set_frames_per_buffer(frames_per_buffer
);
565 DVLOG(1) << "SRI::input_parameters: "
566 << input_parameters
.AsHumanReadableString();
569 // Create an audio converter which converts data between native input format
570 // and WebSpeech specific output format.
571 audio_converter_
.reset(
572 new OnDataConverter(input_parameters
, output_parameters
));
574 audio_controller_
= AudioInputController::Create(
575 audio_manager
, this, input_parameters
, device_id_
, NULL
);
577 if (!audio_controller_
.get()) {
579 SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO_CAPTURE
));
582 audio_log_
->OnCreated(0, input_parameters
, device_id_
);
584 // The endpointer needs to estimate the environment/background noise before
585 // starting to treat the audio as user input. We wait in the state
586 // ESTIMATING_ENVIRONMENT until such interval has elapsed before switching
587 // to user input mode.
588 endpointer_
.SetEnvironmentEstimationMode();
589 audio_controller_
->Record();
590 audio_log_
->OnStarted(0);
591 return STATE_STARTING
;
594 SpeechRecognizerImpl::FSMState
595 SpeechRecognizerImpl::StartRecognitionEngine(const FSMEventArgs
& event_args
) {
596 // This is the first audio packet captured, so the recognition engine is
597 // started and the delegate notified about the event.
598 DCHECK(recognition_engine_
.get() != NULL
);
599 recognition_engine_
->StartRecognition();
600 listener()->OnAudioStart(session_id());
602 // This is a little hack, since TakeAudioChunk() is already called by
603 // ProcessAudioPipeline(). It is the best tradeoff, unless we allow dropping
604 // the first audio chunk captured after opening the audio device.
605 recognition_engine_
->TakeAudioChunk(*(event_args
.audio_data
.get()));
606 return STATE_ESTIMATING_ENVIRONMENT
;
609 SpeechRecognizerImpl::FSMState
610 SpeechRecognizerImpl::WaitEnvironmentEstimationCompletion(const FSMEventArgs
&) {
611 DCHECK(endpointer_
.IsEstimatingEnvironment());
612 if (GetElapsedTimeMs() >= kEndpointerEstimationTimeMs
) {
613 endpointer_
.SetUserInputMode();
614 listener()->OnEnvironmentEstimationComplete(session_id());
615 return STATE_WAITING_FOR_SPEECH
;
617 return STATE_ESTIMATING_ENVIRONMENT
;
621 SpeechRecognizerImpl::FSMState
622 SpeechRecognizerImpl::DetectUserSpeechOrTimeout(const FSMEventArgs
&) {
623 if (endpointer_
.DidStartReceivingSpeech()) {
624 listener()->OnSoundStart(session_id());
625 return STATE_RECOGNIZING
;
626 } else if (GetElapsedTimeMs() >= kNoSpeechTimeoutMs
) {
627 return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_NO_SPEECH
));
629 return STATE_WAITING_FOR_SPEECH
;
632 SpeechRecognizerImpl::FSMState
633 SpeechRecognizerImpl::DetectEndOfSpeech(const FSMEventArgs
& event_args
) {
634 if (endpointer_
.speech_input_complete())
635 return StopCaptureAndWaitForResult(event_args
);
636 return STATE_RECOGNIZING
;
639 SpeechRecognizerImpl::FSMState
640 SpeechRecognizerImpl::StopCaptureAndWaitForResult(const FSMEventArgs
&) {
641 DCHECK(state_
>= STATE_ESTIMATING_ENVIRONMENT
&& state_
<= STATE_RECOGNIZING
);
643 DVLOG(1) << "Concluding recognition";
644 CloseAudioControllerAsynchronously();
645 recognition_engine_
->AudioChunksEnded();
647 if (state_
> STATE_WAITING_FOR_SPEECH
)
648 listener()->OnSoundEnd(session_id());
650 listener()->OnAudioEnd(session_id());
651 return STATE_WAITING_FINAL_RESULT
;
654 SpeechRecognizerImpl::FSMState
655 SpeechRecognizerImpl::AbortSilently(const FSMEventArgs
& event_args
) {
656 DCHECK_NE(event_args
.event
, EVENT_AUDIO_ERROR
);
657 DCHECK_NE(event_args
.event
, EVENT_ENGINE_ERROR
);
658 return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_NONE
));
661 SpeechRecognizerImpl::FSMState
662 SpeechRecognizerImpl::AbortWithError(const FSMEventArgs
& event_args
) {
663 if (event_args
.event
== EVENT_AUDIO_ERROR
) {
665 SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO_CAPTURE
));
666 } else if (event_args
.event
== EVENT_ENGINE_ERROR
) {
667 return Abort(event_args
.engine_error
);
669 return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_ABORTED
));
672 SpeechRecognizerImpl::FSMState
SpeechRecognizerImpl::Abort(
673 const SpeechRecognitionError
& error
) {
674 if (IsCapturingAudio())
675 CloseAudioControllerAsynchronously();
677 DVLOG(1) << "SpeechRecognizerImpl canceling recognition. ";
679 // The recognition engine is initialized only after STATE_STARTING.
680 if (state_
> STATE_STARTING
) {
681 DCHECK(recognition_engine_
.get() != NULL
);
682 recognition_engine_
->EndRecognition();
685 if (state_
> STATE_WAITING_FOR_SPEECH
&& state_
< STATE_WAITING_FINAL_RESULT
)
686 listener()->OnSoundEnd(session_id());
688 if (state_
> STATE_STARTING
&& state_
< STATE_WAITING_FINAL_RESULT
)
689 listener()->OnAudioEnd(session_id());
691 if (error
.code
!= SPEECH_RECOGNITION_ERROR_NONE
)
692 listener()->OnRecognitionError(session_id(), error
);
694 listener()->OnRecognitionEnd(session_id());
699 SpeechRecognizerImpl::FSMState
SpeechRecognizerImpl::ProcessIntermediateResult(
700 const FSMEventArgs
& event_args
) {
701 // Provisional results can occur only if explicitly enabled in the JS API.
702 DCHECK(provisional_results_
);
704 // In continuous recognition, intermediate results can occur even when we are
705 // in the ESTIMATING_ENVIRONMENT or WAITING_FOR_SPEECH states (if the
706 // recognition engine is "faster" than our endpointer). In these cases we
707 // skip the endpointer and fast-forward to the RECOGNIZING state, with respect
708 // of the events triggering order.
709 if (state_
== STATE_ESTIMATING_ENVIRONMENT
) {
710 DCHECK(endpointer_
.IsEstimatingEnvironment());
711 endpointer_
.SetUserInputMode();
712 listener()->OnEnvironmentEstimationComplete(session_id());
713 } else if (state_
== STATE_WAITING_FOR_SPEECH
) {
714 listener()->OnSoundStart(session_id());
716 DCHECK_EQ(STATE_RECOGNIZING
, state_
);
719 listener()->OnRecognitionResults(session_id(), event_args
.engine_results
);
720 return STATE_RECOGNIZING
;
723 SpeechRecognizerImpl::FSMState
724 SpeechRecognizerImpl::ProcessFinalResult(const FSMEventArgs
& event_args
) {
725 const SpeechRecognitionResults
& results
= event_args
.engine_results
;
726 SpeechRecognitionResults::const_iterator i
= results
.begin();
727 bool provisional_results_pending
= false;
728 bool results_are_empty
= true;
729 for (; i
!= results
.end(); ++i
) {
730 const SpeechRecognitionResult
& result
= *i
;
731 if (result
.is_provisional
) {
732 DCHECK(provisional_results_
);
733 provisional_results_pending
= true;
734 } else if (results_are_empty
) {
735 results_are_empty
= result
.hypotheses
.empty();
739 if (provisional_results_pending
) {
740 listener()->OnRecognitionResults(session_id(), results
);
741 // We don't end the recognition if a provisional result is received in
742 // STATE_WAITING_FINAL_RESULT. A definitive result will come next and will
743 // end the recognition.
747 recognition_engine_
->EndRecognition();
749 if (!results_are_empty
) {
750 // We could receive an empty result (which we won't propagate further)
751 // in the following (continuous) scenario:
752 // 1. The caller start pushing audio and receives some results;
753 // 2. A |StopAudioCapture| is issued later;
754 // 3. The final audio frames captured in the interval ]1,2] do not lead to
755 // any result (nor any error);
756 // 4. The speech recognition engine, therefore, emits an empty result to
757 // notify that the recognition is ended with no error, yet neither any
759 listener()->OnRecognitionResults(session_id(), results
);
762 listener()->OnRecognitionEnd(session_id());
766 SpeechRecognizerImpl::FSMState
767 SpeechRecognizerImpl::DoNothing(const FSMEventArgs
&) const {
768 return state_
; // Just keep the current state.
771 SpeechRecognizerImpl::FSMState
772 SpeechRecognizerImpl::NotFeasible(const FSMEventArgs
& event_args
) {
773 NOTREACHED() << "Unfeasible event " << event_args
.event
774 << " in state " << state_
;
778 void SpeechRecognizerImpl::CloseAudioControllerAsynchronously() {
779 DCHECK(IsCapturingAudio());
780 DVLOG(1) << "SpeechRecognizerImpl closing audio controller.";
781 // Issues a Close on the audio controller, passing an empty callback. The only
782 // purpose of such callback is to keep the audio controller refcounted until
783 // Close has completed (in the audio thread) and automatically destroy it
784 // afterwards (upon return from OnAudioClosed).
785 audio_controller_
->Close(base::Bind(&SpeechRecognizerImpl::OnAudioClosed
,
786 this, audio_controller_
));
787 audio_controller_
= NULL
; // The controller is still refcounted by Bind.
788 audio_log_
->OnClosed(0);
791 int SpeechRecognizerImpl::GetElapsedTimeMs() const {
792 return (num_samples_recorded_
* 1000) / kAudioSampleRate
;
795 void SpeechRecognizerImpl::UpdateSignalAndNoiseLevels(const float& rms
,
796 bool clip_detected
) {
797 // Calculate the input volume to display in the UI, smoothing towards the
799 // TODO(primiano): Do we really need all this floating point arith here?
800 // Perhaps it might be quite expensive on mobile.
801 float level
= (rms
- kAudioMeterMinDb
) /
802 (kAudioMeterDbRange
/ kAudioMeterRangeMaxUnclipped
);
803 level
= std::min(std::max(0.0f
, level
), kAudioMeterRangeMaxUnclipped
);
804 const float smoothing_factor
= (level
> audio_level_
) ? kUpSmoothingFactor
:
805 kDownSmoothingFactor
;
806 audio_level_
+= (level
- audio_level_
) * smoothing_factor
;
808 float noise_level
= (endpointer_
.NoiseLevelDb() - kAudioMeterMinDb
) /
809 (kAudioMeterDbRange
/ kAudioMeterRangeMaxUnclipped
);
810 noise_level
= std::min(std::max(0.0f
, noise_level
),
811 kAudioMeterRangeMaxUnclipped
);
813 listener()->OnAudioLevelsChange(
814 session_id(), clip_detected
? 1.0f
: audio_level_
, noise_level
);
817 void SpeechRecognizerImpl::SetAudioManagerForTesting(
818 AudioManager
* audio_manager
) {
819 audio_manager_for_tests_
= audio_manager
;
822 SpeechRecognizerImpl::FSMEventArgs::FSMEventArgs(FSMEvent event_value
)
823 : event(event_value
),
825 engine_error(SPEECH_RECOGNITION_ERROR_NONE
) {
828 SpeechRecognizerImpl::FSMEventArgs::~FSMEventArgs() {
831 } // namespace content