Supervised user whitelists: Cleanup
[chromium-blink-merge.git] / content / browser / speech / speech_recognizer_impl.cc
bloba08ffe17661af3b5ecaeffa381fa22b8a0c6308f
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/browser/speech/speech_recognizer_impl.h"
7 #include "base/basictypes.h"
8 #include "base/bind.h"
9 #include "base/time/time.h"
10 #include "content/browser/browser_main_loop.h"
11 #include "content/browser/media/media_internals.h"
12 #include "content/browser/speech/audio_buffer.h"
13 #include "content/browser/speech/google_one_shot_remote_engine.h"
14 #include "content/public/browser/speech_recognition_event_listener.h"
15 #include "media/base/audio_converter.h"
17 #if defined(OS_WIN)
18 #include "media/audio/win/core_audio_util_win.h"
19 #endif
21 using media::AudioBus;
22 using media::AudioConverter;
23 using media::AudioInputController;
24 using media::AudioManager;
25 using media::AudioParameters;
26 using media::ChannelLayout;
28 namespace content {
30 // Private class which encapsulates the audio converter and the
31 // AudioConverter::InputCallback. It handles resampling, buffering and
32 // channel mixing between input and output parameters.
33 class SpeechRecognizerImpl::OnDataConverter
34 : public media::AudioConverter::InputCallback {
35 public:
36 OnDataConverter(const AudioParameters& input_params,
37 const AudioParameters& output_params);
38 ~OnDataConverter() override;
40 // Converts input audio |data| bus into an AudioChunk where the input format
41 // is given by |input_parameters_| and the output format by
42 // |output_parameters_|.
43 scoped_refptr<AudioChunk> Convert(const AudioBus* data);
45 private:
46 // media::AudioConverter::InputCallback implementation.
47 double ProvideInput(AudioBus* dest, base::TimeDelta buffer_delay) override;
49 // Handles resampling, buffering, and channel mixing between input and output
50 // parameters.
51 AudioConverter audio_converter_;
53 scoped_ptr<AudioBus> input_bus_;
54 scoped_ptr<AudioBus> output_bus_;
55 const AudioParameters input_parameters_;
56 const AudioParameters output_parameters_;
57 bool waiting_for_input_;
59 DISALLOW_COPY_AND_ASSIGN(OnDataConverter);
62 namespace {
64 // The following constants are related to the volume level indicator shown in
65 // the UI for recorded audio.
66 // Multiplier used when new volume is greater than previous level.
67 const float kUpSmoothingFactor = 1.0f;
68 // Multiplier used when new volume is lesser than previous level.
69 const float kDownSmoothingFactor = 0.7f;
70 // RMS dB value of a maximum (unclipped) sine wave for int16 samples.
71 const float kAudioMeterMaxDb = 90.31f;
72 // This value corresponds to RMS dB for int16 with 6 most-significant-bits = 0.
73 // Values lower than this will display as empty level-meter.
74 const float kAudioMeterMinDb = 30.0f;
75 const float kAudioMeterDbRange = kAudioMeterMaxDb - kAudioMeterMinDb;
77 // Maximum level to draw to display unclipped meter. (1.0f displays clipping.)
78 const float kAudioMeterRangeMaxUnclipped = 47.0f / 48.0f;
80 // Returns true if more than 5% of the samples are at min or max value.
81 bool DetectClipping(const AudioChunk& chunk) {
82 const int num_samples = chunk.NumSamples();
83 const int16* samples = chunk.SamplesData16();
84 const int kThreshold = num_samples / 20;
85 int clipping_samples = 0;
87 for (int i = 0; i < num_samples; ++i) {
88 if (samples[i] <= -32767 || samples[i] >= 32767) {
89 if (++clipping_samples > kThreshold)
90 return true;
93 return false;
96 void KeepAudioControllerRefcountedForDtor(scoped_refptr<AudioInputController>) {
99 } // namespace
101 const int SpeechRecognizerImpl::kAudioSampleRate = 16000;
102 const ChannelLayout SpeechRecognizerImpl::kChannelLayout =
103 media::CHANNEL_LAYOUT_MONO;
104 const int SpeechRecognizerImpl::kNumBitsPerAudioSample = 16;
105 const int SpeechRecognizerImpl::kNoSpeechTimeoutMs = 8000;
106 const int SpeechRecognizerImpl::kEndpointerEstimationTimeMs = 300;
107 media::AudioManager* SpeechRecognizerImpl::audio_manager_for_tests_ = NULL;
109 static_assert(SpeechRecognizerImpl::kNumBitsPerAudioSample % 8 == 0,
110 "kNumBitsPerAudioSample must be a multiple of 8");
112 // SpeechRecognizerImpl::OnDataConverter implementation
114 SpeechRecognizerImpl::OnDataConverter::OnDataConverter(
115 const AudioParameters& input_params,
116 const AudioParameters& output_params)
117 : audio_converter_(input_params, output_params, false),
118 input_bus_(AudioBus::Create(input_params)),
119 output_bus_(AudioBus::Create(output_params)),
120 input_parameters_(input_params),
121 output_parameters_(output_params),
122 waiting_for_input_(false) {
123 audio_converter_.AddInput(this);
126 SpeechRecognizerImpl::OnDataConverter::~OnDataConverter() {
127 // It should now be safe to unregister the converter since no more OnData()
128 // callbacks are outstanding at this point.
129 audio_converter_.RemoveInput(this);
132 scoped_refptr<AudioChunk> SpeechRecognizerImpl::OnDataConverter::Convert(
133 const AudioBus* data) {
134 CHECK_EQ(data->frames(), input_parameters_.frames_per_buffer());
136 data->CopyTo(input_bus_.get());
138 waiting_for_input_ = true;
139 audio_converter_.Convert(output_bus_.get());
141 scoped_refptr<AudioChunk> chunk(
142 new AudioChunk(output_parameters_.GetBytesPerBuffer(),
143 output_parameters_.bits_per_sample() / 8));
144 output_bus_->ToInterleaved(output_bus_->frames(),
145 output_parameters_.bits_per_sample() / 8,
146 chunk->writable_data());
147 return chunk;
150 double SpeechRecognizerImpl::OnDataConverter::ProvideInput(
151 AudioBus* dest, base::TimeDelta buffer_delay) {
152 // The audio converted should never ask for more than one bus in each call
153 // to Convert(). If so, we have a serious issue in our design since we might
154 // miss recorded chunks of 100 ms audio data.
155 CHECK(waiting_for_input_);
157 // Read from the input bus to feed the converter.
158 input_bus_->CopyTo(dest);
160 // |input_bus_| should only be provide once.
161 waiting_for_input_ = false;
162 return 1;
165 // SpeechRecognizerImpl implementation
167 SpeechRecognizerImpl::SpeechRecognizerImpl(
168 SpeechRecognitionEventListener* listener,
169 int session_id,
170 bool continuous,
171 bool provisional_results,
172 SpeechRecognitionEngine* engine)
173 : SpeechRecognizer(listener, session_id),
174 recognition_engine_(engine),
175 endpointer_(kAudioSampleRate),
176 audio_log_(MediaInternals::GetInstance()->CreateAudioLog(
177 media::AudioLogFactory::AUDIO_INPUT_CONTROLLER)),
178 is_dispatching_event_(false),
179 provisional_results_(provisional_results),
180 state_(STATE_IDLE) {
181 DCHECK(recognition_engine_ != NULL);
182 if (!continuous) {
183 // In single shot (non-continous) recognition,
184 // the session is automatically ended after:
185 // - 0.5 seconds of silence if time < 3 seconds
186 // - 1 seconds of silence if time >= 3 seconds
187 endpointer_.set_speech_input_complete_silence_length(
188 base::Time::kMicrosecondsPerSecond / 2);
189 endpointer_.set_long_speech_input_complete_silence_length(
190 base::Time::kMicrosecondsPerSecond);
191 endpointer_.set_long_speech_length(3 * base::Time::kMicrosecondsPerSecond);
192 } else {
193 // In continuous recognition, the session is automatically ended after 15
194 // seconds of silence.
195 const int64 cont_timeout_us = base::Time::kMicrosecondsPerSecond * 15;
196 endpointer_.set_speech_input_complete_silence_length(cont_timeout_us);
197 endpointer_.set_long_speech_length(0); // Use only a single timeout.
199 endpointer_.StartSession();
200 recognition_engine_->set_delegate(this);
203 // ------- Methods that trigger Finite State Machine (FSM) events ------------
205 // NOTE:all the external events and requests should be enqueued (PostTask), even
206 // if they come from the same (IO) thread, in order to preserve the relationship
207 // of causality between events and avoid interleaved event processing due to
208 // synchronous callbacks.
210 void SpeechRecognizerImpl::StartRecognition(const std::string& device_id) {
211 DCHECK(!device_id.empty());
212 device_id_ = device_id;
214 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
215 base::Bind(&SpeechRecognizerImpl::DispatchEvent,
216 this, FSMEventArgs(EVENT_START)));
219 void SpeechRecognizerImpl::AbortRecognition() {
220 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
221 base::Bind(&SpeechRecognizerImpl::DispatchEvent,
222 this, FSMEventArgs(EVENT_ABORT)));
225 void SpeechRecognizerImpl::StopAudioCapture() {
226 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
227 base::Bind(&SpeechRecognizerImpl::DispatchEvent,
228 this, FSMEventArgs(EVENT_STOP_CAPTURE)));
231 bool SpeechRecognizerImpl::IsActive() const {
232 // Checking the FSM state from another thread (thus, while the FSM is
233 // potentially concurrently evolving) is meaningless.
234 DCHECK_CURRENTLY_ON(BrowserThread::IO);
235 return state_ != STATE_IDLE && state_ != STATE_ENDED;
238 bool SpeechRecognizerImpl::IsCapturingAudio() const {
239 DCHECK_CURRENTLY_ON(BrowserThread::IO); // See IsActive().
240 const bool is_capturing_audio = state_ >= STATE_STARTING &&
241 state_ <= STATE_RECOGNIZING;
242 DCHECK((is_capturing_audio && (audio_controller_.get() != NULL)) ||
243 (!is_capturing_audio && audio_controller_.get() == NULL));
244 return is_capturing_audio;
247 const SpeechRecognitionEngine&
248 SpeechRecognizerImpl::recognition_engine() const {
249 return *(recognition_engine_.get());
252 SpeechRecognizerImpl::~SpeechRecognizerImpl() {
253 DCHECK_CURRENTLY_ON(BrowserThread::IO);
254 endpointer_.EndSession();
255 if (audio_controller_.get()) {
256 audio_controller_->Close(
257 base::Bind(&KeepAudioControllerRefcountedForDtor, audio_controller_));
258 audio_log_->OnClosed(0);
262 // Invoked in the audio thread.
263 void SpeechRecognizerImpl::OnError(AudioInputController* controller,
264 media::AudioInputController::ErrorCode error_code) {
265 FSMEventArgs event_args(EVENT_AUDIO_ERROR);
266 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
267 base::Bind(&SpeechRecognizerImpl::DispatchEvent,
268 this, event_args));
271 void SpeechRecognizerImpl::OnData(AudioInputController* controller,
272 const AudioBus* data) {
273 // Convert audio from native format to fixed format used by WebSpeech.
274 FSMEventArgs event_args(EVENT_AUDIO_DATA);
275 event_args.audio_data = audio_converter_->Convert(data);
277 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
278 base::Bind(&SpeechRecognizerImpl::DispatchEvent,
279 this, event_args));
282 void SpeechRecognizerImpl::OnAudioClosed(AudioInputController*) {}
284 void SpeechRecognizerImpl::OnSpeechRecognitionEngineResults(
285 const SpeechRecognitionResults& results) {
286 FSMEventArgs event_args(EVENT_ENGINE_RESULT);
287 event_args.engine_results = results;
288 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
289 base::Bind(&SpeechRecognizerImpl::DispatchEvent,
290 this, event_args));
293 void SpeechRecognizerImpl::OnSpeechRecognitionEngineError(
294 const SpeechRecognitionError& error) {
295 FSMEventArgs event_args(EVENT_ENGINE_ERROR);
296 event_args.engine_error = error;
297 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
298 base::Bind(&SpeechRecognizerImpl::DispatchEvent,
299 this, event_args));
302 // ----------------------- Core FSM implementation ---------------------------
303 // TODO(primiano): After the changes in the media package (r129173), this class
304 // slightly violates the SpeechRecognitionEventListener interface contract. In
305 // particular, it is not true anymore that this class can be freed after the
306 // OnRecognitionEnd event, since the audio_controller_.Close() asynchronous
307 // call can be still in progress after the end event. Currently, it does not
308 // represent a problem for the browser itself, since refcounting protects us
309 // against such race conditions. However, we should fix this in the next CLs.
310 // For instance, tests are currently working just because the
311 // TestAudioInputController is not closing asynchronously as the real controller
312 // does, but they will become flaky if TestAudioInputController will be fixed.
314 void SpeechRecognizerImpl::DispatchEvent(const FSMEventArgs& event_args) {
315 DCHECK_CURRENTLY_ON(BrowserThread::IO);
316 DCHECK_LE(event_args.event, EVENT_MAX_VALUE);
317 DCHECK_LE(state_, STATE_MAX_VALUE);
319 // Event dispatching must be sequential, otherwise it will break all the rules
320 // and the assumptions of the finite state automata model.
321 DCHECK(!is_dispatching_event_);
322 is_dispatching_event_ = true;
324 // Guard against the delegate freeing us until we finish processing the event.
325 scoped_refptr<SpeechRecognizerImpl> me(this);
327 if (event_args.event == EVENT_AUDIO_DATA) {
328 DCHECK(event_args.audio_data.get() != NULL);
329 ProcessAudioPipeline(*event_args.audio_data.get());
332 // The audio pipeline must be processed before the event dispatch, otherwise
333 // it would take actions according to the future state instead of the current.
334 state_ = ExecuteTransitionAndGetNextState(event_args);
335 is_dispatching_event_ = false;
338 SpeechRecognizerImpl::FSMState
339 SpeechRecognizerImpl::ExecuteTransitionAndGetNextState(
340 const FSMEventArgs& event_args) {
341 const FSMEvent event = event_args.event;
342 switch (state_) {
343 case STATE_IDLE:
344 switch (event) {
345 // TODO(primiano): restore UNREACHABLE_CONDITION on EVENT_ABORT and
346 // EVENT_STOP_CAPTURE below once speech input extensions are fixed.
347 case EVENT_ABORT:
348 return AbortSilently(event_args);
349 case EVENT_START:
350 return StartRecording(event_args);
351 case EVENT_STOP_CAPTURE:
352 return AbortSilently(event_args);
353 case EVENT_AUDIO_DATA: // Corner cases related to queued messages
354 case EVENT_ENGINE_RESULT: // being lately dispatched.
355 case EVENT_ENGINE_ERROR:
356 case EVENT_AUDIO_ERROR:
357 return DoNothing(event_args);
359 break;
360 case STATE_STARTING:
361 switch (event) {
362 case EVENT_ABORT:
363 return AbortWithError(event_args);
364 case EVENT_START:
365 return NotFeasible(event_args);
366 case EVENT_STOP_CAPTURE:
367 return AbortSilently(event_args);
368 case EVENT_AUDIO_DATA:
369 return StartRecognitionEngine(event_args);
370 case EVENT_ENGINE_RESULT:
371 return NotFeasible(event_args);
372 case EVENT_ENGINE_ERROR:
373 case EVENT_AUDIO_ERROR:
374 return AbortWithError(event_args);
376 break;
377 case STATE_ESTIMATING_ENVIRONMENT:
378 switch (event) {
379 case EVENT_ABORT:
380 return AbortWithError(event_args);
381 case EVENT_START:
382 return NotFeasible(event_args);
383 case EVENT_STOP_CAPTURE:
384 return StopCaptureAndWaitForResult(event_args);
385 case EVENT_AUDIO_DATA:
386 return WaitEnvironmentEstimationCompletion(event_args);
387 case EVENT_ENGINE_RESULT:
388 return ProcessIntermediateResult(event_args);
389 case EVENT_ENGINE_ERROR:
390 case EVENT_AUDIO_ERROR:
391 return AbortWithError(event_args);
393 break;
394 case STATE_WAITING_FOR_SPEECH:
395 switch (event) {
396 case EVENT_ABORT:
397 return AbortWithError(event_args);
398 case EVENT_START:
399 return NotFeasible(event_args);
400 case EVENT_STOP_CAPTURE:
401 return StopCaptureAndWaitForResult(event_args);
402 case EVENT_AUDIO_DATA:
403 return DetectUserSpeechOrTimeout(event_args);
404 case EVENT_ENGINE_RESULT:
405 return ProcessIntermediateResult(event_args);
406 case EVENT_ENGINE_ERROR:
407 case EVENT_AUDIO_ERROR:
408 return AbortWithError(event_args);
410 break;
411 case STATE_RECOGNIZING:
412 switch (event) {
413 case EVENT_ABORT:
414 return AbortWithError(event_args);
415 case EVENT_START:
416 return NotFeasible(event_args);
417 case EVENT_STOP_CAPTURE:
418 return StopCaptureAndWaitForResult(event_args);
419 case EVENT_AUDIO_DATA:
420 return DetectEndOfSpeech(event_args);
421 case EVENT_ENGINE_RESULT:
422 return ProcessIntermediateResult(event_args);
423 case EVENT_ENGINE_ERROR:
424 case EVENT_AUDIO_ERROR:
425 return AbortWithError(event_args);
427 break;
428 case STATE_WAITING_FINAL_RESULT:
429 switch (event) {
430 case EVENT_ABORT:
431 return AbortWithError(event_args);
432 case EVENT_START:
433 return NotFeasible(event_args);
434 case EVENT_STOP_CAPTURE:
435 case EVENT_AUDIO_DATA:
436 return DoNothing(event_args);
437 case EVENT_ENGINE_RESULT:
438 return ProcessFinalResult(event_args);
439 case EVENT_ENGINE_ERROR:
440 case EVENT_AUDIO_ERROR:
441 return AbortWithError(event_args);
443 break;
445 // TODO(primiano): remove this state when speech input extensions support
446 // will be removed and STATE_IDLE.EVENT_ABORT,EVENT_STOP_CAPTURE will be
447 // reset to NotFeasible (see TODO above).
448 case STATE_ENDED:
449 return DoNothing(event_args);
451 return NotFeasible(event_args);
454 // ----------- Contract for all the FSM evolution functions below -------------
455 // - Are guaranteed to be executed in the IO thread;
456 // - Are guaranteed to be not reentrant (themselves and each other);
457 // - event_args members are guaranteed to be stable during the call;
458 // - The class won't be freed in the meanwhile due to callbacks;
459 // - IsCapturingAudio() returns true if and only if audio_controller_ != NULL.
461 // TODO(primiano): the audio pipeline is currently serial. However, the
462 // clipper->endpointer->vumeter chain and the sr_engine could be parallelized.
463 // We should profile the execution to see if it would be worth or not.
464 void SpeechRecognizerImpl::ProcessAudioPipeline(const AudioChunk& raw_audio) {
465 const bool route_to_endpointer = state_ >= STATE_ESTIMATING_ENVIRONMENT &&
466 state_ <= STATE_RECOGNIZING;
467 const bool route_to_sr_engine = route_to_endpointer;
468 const bool route_to_vumeter = state_ >= STATE_WAITING_FOR_SPEECH &&
469 state_ <= STATE_RECOGNIZING;
470 const bool clip_detected = DetectClipping(raw_audio);
471 float rms = 0.0f;
473 num_samples_recorded_ += raw_audio.NumSamples();
475 if (route_to_endpointer)
476 endpointer_.ProcessAudio(raw_audio, &rms);
478 if (route_to_vumeter) {
479 DCHECK(route_to_endpointer); // Depends on endpointer due to |rms|.
480 UpdateSignalAndNoiseLevels(rms, clip_detected);
482 if (route_to_sr_engine) {
483 DCHECK(recognition_engine_.get() != NULL);
484 recognition_engine_->TakeAudioChunk(raw_audio);
488 SpeechRecognizerImpl::FSMState
489 SpeechRecognizerImpl::StartRecording(const FSMEventArgs&) {
490 DCHECK(recognition_engine_.get() != NULL);
491 DCHECK(!IsCapturingAudio());
492 const bool unit_test_is_active = (audio_manager_for_tests_ != NULL);
493 AudioManager* audio_manager = unit_test_is_active ?
494 audio_manager_for_tests_ :
495 AudioManager::Get();
496 DCHECK(audio_manager != NULL);
498 DVLOG(1) << "SpeechRecognizerImpl starting audio capture.";
499 num_samples_recorded_ = 0;
500 audio_level_ = 0;
501 listener()->OnRecognitionStart(session_id());
503 // TODO(xians): Check if the OS has the device with |device_id_|, return
504 // |SPEECH_AUDIO_ERROR_DETAILS_NO_MIC| if the target device does not exist.
505 if (!audio_manager->HasAudioInputDevices()) {
506 return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO_CAPTURE,
507 SPEECH_AUDIO_ERROR_DETAILS_NO_MIC));
510 int chunk_duration_ms = recognition_engine_->GetDesiredAudioChunkDurationMs();
512 AudioParameters in_params = audio_manager->GetInputStreamParameters(
513 device_id_);
514 if (!in_params.IsValid() && !unit_test_is_active) {
515 DLOG(ERROR) << "Invalid native audio input parameters";
516 return Abort(
517 SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO_CAPTURE));
520 // Audio converter shall provide audio based on these parameters as output.
521 // Hard coded, WebSpeech specific parameters are utilized here.
522 int frames_per_buffer = (kAudioSampleRate * chunk_duration_ms) / 1000;
523 AudioParameters output_parameters = AudioParameters(
524 AudioParameters::AUDIO_PCM_LOW_LATENCY, kChannelLayout, kAudioSampleRate,
525 kNumBitsPerAudioSample, frames_per_buffer);
527 // Audio converter will receive audio based on these parameters as input.
528 // On Windows we start by verifying that Core Audio is supported. If not,
529 // the WaveIn API is used and we might as well avoid all audio conversations
530 // since WaveIn does the conversion for us.
531 // TODO(henrika): this code should be moved to platform dependent audio
532 // managers.
533 bool use_native_audio_params = true;
534 #if defined(OS_WIN)
535 use_native_audio_params = media::CoreAudioUtil::IsSupported();
536 DVLOG_IF(1, !use_native_audio_params) << "Reverting to WaveIn for WebSpeech";
537 #endif
539 AudioParameters input_parameters = output_parameters;
540 if (use_native_audio_params && !unit_test_is_active) {
541 // Use native audio parameters but avoid opening up at the native buffer
542 // size. Instead use same frame size (in milliseconds) as WebSpeech uses.
543 // We rely on internal buffers in the audio back-end to fulfill this request
544 // and the idea is to simplify the audio conversion since each Convert()
545 // call will then render exactly one ProvideInput() call.
546 // Due to implementation details in the audio converter, 2 milliseconds
547 // are added to the default frame size (100 ms) to ensure there is enough
548 // data to generate 100 ms of output when resampling.
549 frames_per_buffer =
550 ((in_params.sample_rate() * (chunk_duration_ms + 2)) / 1000.0) + 0.5;
551 input_parameters.Reset(in_params.format(),
552 in_params.channel_layout(),
553 in_params.channels(),
554 in_params.sample_rate(),
555 in_params.bits_per_sample(),
556 frames_per_buffer);
559 // Create an audio converter which converts data between native input format
560 // and WebSpeech specific output format.
561 audio_converter_.reset(
562 new OnDataConverter(input_parameters, output_parameters));
564 audio_controller_ = AudioInputController::Create(
565 audio_manager, this, input_parameters, device_id_, NULL);
567 if (!audio_controller_.get()) {
568 return Abort(
569 SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO_CAPTURE));
572 audio_log_->OnCreated(0, input_parameters, device_id_);
574 // The endpointer needs to estimate the environment/background noise before
575 // starting to treat the audio as user input. We wait in the state
576 // ESTIMATING_ENVIRONMENT until such interval has elapsed before switching
577 // to user input mode.
578 endpointer_.SetEnvironmentEstimationMode();
579 audio_controller_->Record();
580 audio_log_->OnStarted(0);
581 return STATE_STARTING;
584 SpeechRecognizerImpl::FSMState
585 SpeechRecognizerImpl::StartRecognitionEngine(const FSMEventArgs& event_args) {
586 // This is the first audio packet captured, so the recognition engine is
587 // started and the delegate notified about the event.
588 DCHECK(recognition_engine_.get() != NULL);
589 recognition_engine_->StartRecognition();
590 listener()->OnAudioStart(session_id());
592 // This is a little hack, since TakeAudioChunk() is already called by
593 // ProcessAudioPipeline(). It is the best tradeoff, unless we allow dropping
594 // the first audio chunk captured after opening the audio device.
595 recognition_engine_->TakeAudioChunk(*(event_args.audio_data.get()));
596 return STATE_ESTIMATING_ENVIRONMENT;
599 SpeechRecognizerImpl::FSMState
600 SpeechRecognizerImpl::WaitEnvironmentEstimationCompletion(const FSMEventArgs&) {
601 DCHECK(endpointer_.IsEstimatingEnvironment());
602 if (GetElapsedTimeMs() >= kEndpointerEstimationTimeMs) {
603 endpointer_.SetUserInputMode();
604 listener()->OnEnvironmentEstimationComplete(session_id());
605 return STATE_WAITING_FOR_SPEECH;
606 } else {
607 return STATE_ESTIMATING_ENVIRONMENT;
611 SpeechRecognizerImpl::FSMState
612 SpeechRecognizerImpl::DetectUserSpeechOrTimeout(const FSMEventArgs&) {
613 if (endpointer_.DidStartReceivingSpeech()) {
614 listener()->OnSoundStart(session_id());
615 return STATE_RECOGNIZING;
616 } else if (GetElapsedTimeMs() >= kNoSpeechTimeoutMs) {
617 return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_NO_SPEECH));
619 return STATE_WAITING_FOR_SPEECH;
622 SpeechRecognizerImpl::FSMState
623 SpeechRecognizerImpl::DetectEndOfSpeech(const FSMEventArgs& event_args) {
624 if (endpointer_.speech_input_complete())
625 return StopCaptureAndWaitForResult(event_args);
626 return STATE_RECOGNIZING;
629 SpeechRecognizerImpl::FSMState
630 SpeechRecognizerImpl::StopCaptureAndWaitForResult(const FSMEventArgs&) {
631 DCHECK(state_ >= STATE_ESTIMATING_ENVIRONMENT && state_ <= STATE_RECOGNIZING);
633 DVLOG(1) << "Concluding recognition";
634 CloseAudioControllerAsynchronously();
635 recognition_engine_->AudioChunksEnded();
637 if (state_ > STATE_WAITING_FOR_SPEECH)
638 listener()->OnSoundEnd(session_id());
640 listener()->OnAudioEnd(session_id());
641 return STATE_WAITING_FINAL_RESULT;
644 SpeechRecognizerImpl::FSMState
645 SpeechRecognizerImpl::AbortSilently(const FSMEventArgs& event_args) {
646 DCHECK_NE(event_args.event, EVENT_AUDIO_ERROR);
647 DCHECK_NE(event_args.event, EVENT_ENGINE_ERROR);
648 return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_NONE));
651 SpeechRecognizerImpl::FSMState
652 SpeechRecognizerImpl::AbortWithError(const FSMEventArgs& event_args) {
653 if (event_args.event == EVENT_AUDIO_ERROR) {
654 return Abort(
655 SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO_CAPTURE));
656 } else if (event_args.event == EVENT_ENGINE_ERROR) {
657 return Abort(event_args.engine_error);
659 return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_ABORTED));
662 SpeechRecognizerImpl::FSMState SpeechRecognizerImpl::Abort(
663 const SpeechRecognitionError& error) {
664 if (IsCapturingAudio())
665 CloseAudioControllerAsynchronously();
667 DVLOG(1) << "SpeechRecognizerImpl canceling recognition. ";
669 // The recognition engine is initialized only after STATE_STARTING.
670 if (state_ > STATE_STARTING) {
671 DCHECK(recognition_engine_.get() != NULL);
672 recognition_engine_->EndRecognition();
675 if (state_ > STATE_WAITING_FOR_SPEECH && state_ < STATE_WAITING_FINAL_RESULT)
676 listener()->OnSoundEnd(session_id());
678 if (state_ > STATE_STARTING && state_ < STATE_WAITING_FINAL_RESULT)
679 listener()->OnAudioEnd(session_id());
681 if (error.code != SPEECH_RECOGNITION_ERROR_NONE)
682 listener()->OnRecognitionError(session_id(), error);
684 listener()->OnRecognitionEnd(session_id());
686 return STATE_ENDED;
689 SpeechRecognizerImpl::FSMState SpeechRecognizerImpl::ProcessIntermediateResult(
690 const FSMEventArgs& event_args) {
691 // Provisional results can occur only if explicitly enabled in the JS API.
692 DCHECK(provisional_results_);
694 // In continuous recognition, intermediate results can occur even when we are
695 // in the ESTIMATING_ENVIRONMENT or WAITING_FOR_SPEECH states (if the
696 // recognition engine is "faster" than our endpointer). In these cases we
697 // skip the endpointer and fast-forward to the RECOGNIZING state, with respect
698 // of the events triggering order.
699 if (state_ == STATE_ESTIMATING_ENVIRONMENT) {
700 DCHECK(endpointer_.IsEstimatingEnvironment());
701 endpointer_.SetUserInputMode();
702 listener()->OnEnvironmentEstimationComplete(session_id());
703 } else if (state_ == STATE_WAITING_FOR_SPEECH) {
704 listener()->OnSoundStart(session_id());
705 } else {
706 DCHECK_EQ(STATE_RECOGNIZING, state_);
709 listener()->OnRecognitionResults(session_id(), event_args.engine_results);
710 return STATE_RECOGNIZING;
713 SpeechRecognizerImpl::FSMState
714 SpeechRecognizerImpl::ProcessFinalResult(const FSMEventArgs& event_args) {
715 const SpeechRecognitionResults& results = event_args.engine_results;
716 SpeechRecognitionResults::const_iterator i = results.begin();
717 bool provisional_results_pending = false;
718 bool results_are_empty = true;
719 for (; i != results.end(); ++i) {
720 const SpeechRecognitionResult& result = *i;
721 if (result.is_provisional) {
722 DCHECK(provisional_results_);
723 provisional_results_pending = true;
724 } else if (results_are_empty) {
725 results_are_empty = result.hypotheses.empty();
729 if (provisional_results_pending) {
730 listener()->OnRecognitionResults(session_id(), results);
731 // We don't end the recognition if a provisional result is received in
732 // STATE_WAITING_FINAL_RESULT. A definitive result will come next and will
733 // end the recognition.
734 return state_;
737 recognition_engine_->EndRecognition();
739 if (!results_are_empty) {
740 // We could receive an empty result (which we won't propagate further)
741 // in the following (continuous) scenario:
742 // 1. The caller start pushing audio and receives some results;
743 // 2. A |StopAudioCapture| is issued later;
744 // 3. The final audio frames captured in the interval ]1,2] do not lead to
745 // any result (nor any error);
746 // 4. The speech recognition engine, therefore, emits an empty result to
747 // notify that the recognition is ended with no error, yet neither any
748 // further result.
749 listener()->OnRecognitionResults(session_id(), results);
752 listener()->OnRecognitionEnd(session_id());
753 return STATE_ENDED;
756 SpeechRecognizerImpl::FSMState
757 SpeechRecognizerImpl::DoNothing(const FSMEventArgs&) const {
758 return state_; // Just keep the current state.
761 SpeechRecognizerImpl::FSMState
762 SpeechRecognizerImpl::NotFeasible(const FSMEventArgs& event_args) {
763 NOTREACHED() << "Unfeasible event " << event_args.event
764 << " in state " << state_;
765 return state_;
768 void SpeechRecognizerImpl::CloseAudioControllerAsynchronously() {
769 DCHECK(IsCapturingAudio());
770 DVLOG(1) << "SpeechRecognizerImpl closing audio controller.";
771 // Issues a Close on the audio controller, passing an empty callback. The only
772 // purpose of such callback is to keep the audio controller refcounted until
773 // Close has completed (in the audio thread) and automatically destroy it
774 // afterwards (upon return from OnAudioClosed).
775 audio_controller_->Close(base::Bind(&SpeechRecognizerImpl::OnAudioClosed,
776 this, audio_controller_));
777 audio_controller_ = NULL; // The controller is still refcounted by Bind.
778 audio_log_->OnClosed(0);
781 int SpeechRecognizerImpl::GetElapsedTimeMs() const {
782 return (num_samples_recorded_ * 1000) / kAudioSampleRate;
785 void SpeechRecognizerImpl::UpdateSignalAndNoiseLevels(const float& rms,
786 bool clip_detected) {
787 // Calculate the input volume to display in the UI, smoothing towards the
788 // new level.
789 // TODO(primiano): Do we really need all this floating point arith here?
790 // Perhaps it might be quite expensive on mobile.
791 float level = (rms - kAudioMeterMinDb) /
792 (kAudioMeterDbRange / kAudioMeterRangeMaxUnclipped);
793 level = std::min(std::max(0.0f, level), kAudioMeterRangeMaxUnclipped);
794 const float smoothing_factor = (level > audio_level_) ? kUpSmoothingFactor :
795 kDownSmoothingFactor;
796 audio_level_ += (level - audio_level_) * smoothing_factor;
798 float noise_level = (endpointer_.NoiseLevelDb() - kAudioMeterMinDb) /
799 (kAudioMeterDbRange / kAudioMeterRangeMaxUnclipped);
800 noise_level = std::min(std::max(0.0f, noise_level),
801 kAudioMeterRangeMaxUnclipped);
803 listener()->OnAudioLevelsChange(
804 session_id(), clip_detected ? 1.0f : audio_level_, noise_level);
807 void SpeechRecognizerImpl::SetAudioManagerForTesting(
808 AudioManager* audio_manager) {
809 audio_manager_for_tests_ = audio_manager;
812 SpeechRecognizerImpl::FSMEventArgs::FSMEventArgs(FSMEvent event_value)
813 : event(event_value),
814 audio_data(NULL),
815 engine_error(SPEECH_RECOGNITION_ERROR_NONE) {
818 SpeechRecognizerImpl::FSMEventArgs::~FSMEventArgs() {
821 } // namespace content