[refactor] More post-NSS WebCrypto cleanups (utility functions).
[chromium-blink-merge.git] / content / browser / speech / speech_recognizer_impl.cc
blob363e16fbe77013e22f97562f7fe438ffa0c206e3
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/browser/speech/speech_recognizer_impl.h"
7 #include "base/basictypes.h"
8 #include "base/bind.h"
9 #include "base/time/time.h"
10 #include "content/browser/browser_main_loop.h"
11 #include "content/browser/media/media_internals.h"
12 #include "content/browser/speech/audio_buffer.h"
13 #include "content/browser/speech/google_one_shot_remote_engine.h"
14 #include "content/public/browser/speech_recognition_event_listener.h"
15 #include "media/base/audio_converter.h"
17 #if defined(OS_WIN)
18 #include "media/audio/win/core_audio_util_win.h"
19 #endif
21 using media::AudioBus;
22 using media::AudioConverter;
23 using media::AudioInputController;
24 using media::AudioManager;
25 using media::AudioParameters;
26 using media::ChannelLayout;
28 namespace content {
30 // Private class which encapsulates the audio converter and the
31 // AudioConverter::InputCallback. It handles resampling, buffering and
32 // channel mixing between input and output parameters.
33 class SpeechRecognizerImpl::OnDataConverter
34 : public media::AudioConverter::InputCallback {
35 public:
36 OnDataConverter(const AudioParameters& input_params,
37 const AudioParameters& output_params);
38 ~OnDataConverter() override;
40 // Converts input audio |data| bus into an AudioChunk where the input format
41 // is given by |input_parameters_| and the output format by
42 // |output_parameters_|.
43 scoped_refptr<AudioChunk> Convert(const AudioBus* data);
45 bool data_was_converted() const { return data_was_converted_; }
47 private:
48 // media::AudioConverter::InputCallback implementation.
49 double ProvideInput(AudioBus* dest, base::TimeDelta buffer_delay) override;
51 // Handles resampling, buffering, and channel mixing between input and output
52 // parameters.
53 AudioConverter audio_converter_;
55 scoped_ptr<AudioBus> input_bus_;
56 scoped_ptr<AudioBus> output_bus_;
57 const AudioParameters input_parameters_;
58 const AudioParameters output_parameters_;
59 bool data_was_converted_;
61 DISALLOW_COPY_AND_ASSIGN(OnDataConverter);
64 namespace {
66 // The following constants are related to the volume level indicator shown in
67 // the UI for recorded audio.
68 // Multiplier used when new volume is greater than previous level.
69 const float kUpSmoothingFactor = 1.0f;
70 // Multiplier used when new volume is lesser than previous level.
71 const float kDownSmoothingFactor = 0.7f;
72 // RMS dB value of a maximum (unclipped) sine wave for int16 samples.
73 const float kAudioMeterMaxDb = 90.31f;
74 // This value corresponds to RMS dB for int16 with 6 most-significant-bits = 0.
75 // Values lower than this will display as empty level-meter.
76 const float kAudioMeterMinDb = 30.0f;
77 const float kAudioMeterDbRange = kAudioMeterMaxDb - kAudioMeterMinDb;
79 // Maximum level to draw to display unclipped meter. (1.0f displays clipping.)
80 const float kAudioMeterRangeMaxUnclipped = 47.0f / 48.0f;
82 // Returns true if more than 5% of the samples are at min or max value.
83 bool DetectClipping(const AudioChunk& chunk) {
84 const int num_samples = chunk.NumSamples();
85 const int16* samples = chunk.SamplesData16();
86 const int kThreshold = num_samples / 20;
87 int clipping_samples = 0;
89 for (int i = 0; i < num_samples; ++i) {
90 if (samples[i] <= -32767 || samples[i] >= 32767) {
91 if (++clipping_samples > kThreshold)
92 return true;
95 return false;
98 void KeepAudioControllerRefcountedForDtor(scoped_refptr<AudioInputController>) {
101 } // namespace
103 const int SpeechRecognizerImpl::kAudioSampleRate = 16000;
104 const ChannelLayout SpeechRecognizerImpl::kChannelLayout =
105 media::CHANNEL_LAYOUT_MONO;
106 const int SpeechRecognizerImpl::kNumBitsPerAudioSample = 16;
107 const int SpeechRecognizerImpl::kNoSpeechTimeoutMs = 8000;
108 const int SpeechRecognizerImpl::kEndpointerEstimationTimeMs = 300;
109 media::AudioManager* SpeechRecognizerImpl::audio_manager_for_tests_ = NULL;
111 static_assert(SpeechRecognizerImpl::kNumBitsPerAudioSample % 8 == 0,
112 "kNumBitsPerAudioSample must be a multiple of 8");
114 // SpeechRecognizerImpl::OnDataConverter implementation
116 SpeechRecognizerImpl::OnDataConverter::OnDataConverter(
117 const AudioParameters& input_params,
118 const AudioParameters& output_params)
119 : audio_converter_(input_params, output_params, false),
120 input_bus_(AudioBus::Create(input_params)),
121 output_bus_(AudioBus::Create(output_params)),
122 input_parameters_(input_params),
123 output_parameters_(output_params),
124 data_was_converted_(false) {
125 audio_converter_.AddInput(this);
126 audio_converter_.PrimeWithSilence();
129 SpeechRecognizerImpl::OnDataConverter::~OnDataConverter() {
130 // It should now be safe to unregister the converter since no more OnData()
131 // callbacks are outstanding at this point.
132 audio_converter_.RemoveInput(this);
135 scoped_refptr<AudioChunk> SpeechRecognizerImpl::OnDataConverter::Convert(
136 const AudioBus* data) {
137 CHECK_EQ(data->frames(), input_parameters_.frames_per_buffer());
138 data_was_converted_ = false;
139 // Copy recorded audio to the |input_bus_| for later use in ProvideInput().
140 data->CopyTo(input_bus_.get());
141 // Convert the audio and place the result in |output_bus_|. This call will
142 // result in a ProvideInput() callback where the actual input is provided.
143 // However, it can happen that the converter contains enough cached data
144 // to return a result without calling ProvideInput(). The caller of this
145 // method should check the state of data_was_converted_() and make an
146 // additional call if it is set to false at return.
147 // See http://crbug.com/506051 for details.
148 audio_converter_.Convert(output_bus_.get());
149 // Create an audio chunk based on the converted result.
150 scoped_refptr<AudioChunk> chunk(
151 new AudioChunk(output_parameters_.GetBytesPerBuffer(),
152 output_parameters_.bits_per_sample() / 8));
153 output_bus_->ToInterleaved(output_bus_->frames(),
154 output_parameters_.bits_per_sample() / 8,
155 chunk->writable_data());
156 return chunk;
159 double SpeechRecognizerImpl::OnDataConverter::ProvideInput(
160 AudioBus* dest, base::TimeDelta buffer_delay) {
161 // Read from the input bus to feed the converter.
162 input_bus_->CopyTo(dest);
163 // Indicate that the recorded audio has in fact been used by the converter.
164 data_was_converted_ = true;
165 return 1;
168 // SpeechRecognizerImpl implementation
170 SpeechRecognizerImpl::SpeechRecognizerImpl(
171 SpeechRecognitionEventListener* listener,
172 int session_id,
173 bool continuous,
174 bool provisional_results,
175 SpeechRecognitionEngine* engine)
176 : SpeechRecognizer(listener, session_id),
177 recognition_engine_(engine),
178 endpointer_(kAudioSampleRate),
179 audio_log_(MediaInternals::GetInstance()->CreateAudioLog(
180 media::AudioLogFactory::AUDIO_INPUT_CONTROLLER)),
181 is_dispatching_event_(false),
182 provisional_results_(provisional_results),
183 state_(STATE_IDLE) {
184 DCHECK(recognition_engine_ != NULL);
185 if (!continuous) {
186 // In single shot (non-continous) recognition,
187 // the session is automatically ended after:
188 // - 0.5 seconds of silence if time < 3 seconds
189 // - 1 seconds of silence if time >= 3 seconds
190 endpointer_.set_speech_input_complete_silence_length(
191 base::Time::kMicrosecondsPerSecond / 2);
192 endpointer_.set_long_speech_input_complete_silence_length(
193 base::Time::kMicrosecondsPerSecond);
194 endpointer_.set_long_speech_length(3 * base::Time::kMicrosecondsPerSecond);
195 } else {
196 // In continuous recognition, the session is automatically ended after 15
197 // seconds of silence.
198 const int64 cont_timeout_us = base::Time::kMicrosecondsPerSecond * 15;
199 endpointer_.set_speech_input_complete_silence_length(cont_timeout_us);
200 endpointer_.set_long_speech_length(0); // Use only a single timeout.
202 endpointer_.StartSession();
203 recognition_engine_->set_delegate(this);
206 // ------- Methods that trigger Finite State Machine (FSM) events ------------
208 // NOTE:all the external events and requests should be enqueued (PostTask), even
209 // if they come from the same (IO) thread, in order to preserve the relationship
210 // of causality between events and avoid interleaved event processing due to
211 // synchronous callbacks.
213 void SpeechRecognizerImpl::StartRecognition(const std::string& device_id) {
214 DCHECK(!device_id.empty());
215 device_id_ = device_id;
217 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
218 base::Bind(&SpeechRecognizerImpl::DispatchEvent,
219 this, FSMEventArgs(EVENT_START)));
222 void SpeechRecognizerImpl::AbortRecognition() {
223 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
224 base::Bind(&SpeechRecognizerImpl::DispatchEvent,
225 this, FSMEventArgs(EVENT_ABORT)));
228 void SpeechRecognizerImpl::StopAudioCapture() {
229 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
230 base::Bind(&SpeechRecognizerImpl::DispatchEvent,
231 this, FSMEventArgs(EVENT_STOP_CAPTURE)));
234 bool SpeechRecognizerImpl::IsActive() const {
235 // Checking the FSM state from another thread (thus, while the FSM is
236 // potentially concurrently evolving) is meaningless.
237 DCHECK_CURRENTLY_ON(BrowserThread::IO);
238 return state_ != STATE_IDLE && state_ != STATE_ENDED;
241 bool SpeechRecognizerImpl::IsCapturingAudio() const {
242 DCHECK_CURRENTLY_ON(BrowserThread::IO); // See IsActive().
243 const bool is_capturing_audio = state_ >= STATE_STARTING &&
244 state_ <= STATE_RECOGNIZING;
245 DCHECK((is_capturing_audio && (audio_controller_.get() != NULL)) ||
246 (!is_capturing_audio && audio_controller_.get() == NULL));
247 return is_capturing_audio;
250 const SpeechRecognitionEngine&
251 SpeechRecognizerImpl::recognition_engine() const {
252 return *(recognition_engine_.get());
255 SpeechRecognizerImpl::~SpeechRecognizerImpl() {
256 DCHECK_CURRENTLY_ON(BrowserThread::IO);
257 endpointer_.EndSession();
258 if (audio_controller_.get()) {
259 audio_controller_->Close(
260 base::Bind(&KeepAudioControllerRefcountedForDtor, audio_controller_));
261 audio_log_->OnClosed(0);
265 // Invoked in the audio thread.
266 void SpeechRecognizerImpl::OnError(AudioInputController* controller,
267 media::AudioInputController::ErrorCode error_code) {
268 FSMEventArgs event_args(EVENT_AUDIO_ERROR);
269 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
270 base::Bind(&SpeechRecognizerImpl::DispatchEvent,
271 this, event_args));
274 void SpeechRecognizerImpl::OnData(AudioInputController* controller,
275 const AudioBus* data) {
276 // Convert audio from native format to fixed format used by WebSpeech.
277 FSMEventArgs event_args(EVENT_AUDIO_DATA);
278 event_args.audio_data = audio_converter_->Convert(data);
279 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
280 base::Bind(&SpeechRecognizerImpl::DispatchEvent,
281 this, event_args));
282 // See http://crbug.com/506051 regarding why one extra convert call can
283 // sometimes be required. It should be a rare case.
284 if (!audio_converter_->data_was_converted()) {
285 event_args.audio_data = audio_converter_->Convert(data);
286 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
287 base::Bind(&SpeechRecognizerImpl::DispatchEvent,
288 this, event_args));
290 // Something is seriously wrong here and we are most likely missing some
291 // audio segments.
292 CHECK(audio_converter_->data_was_converted());
295 void SpeechRecognizerImpl::OnAudioClosed(AudioInputController*) {}
297 void SpeechRecognizerImpl::OnSpeechRecognitionEngineResults(
298 const SpeechRecognitionResults& results) {
299 FSMEventArgs event_args(EVENT_ENGINE_RESULT);
300 event_args.engine_results = results;
301 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
302 base::Bind(&SpeechRecognizerImpl::DispatchEvent,
303 this, event_args));
306 void SpeechRecognizerImpl::OnSpeechRecognitionEngineError(
307 const SpeechRecognitionError& error) {
308 FSMEventArgs event_args(EVENT_ENGINE_ERROR);
309 event_args.engine_error = error;
310 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
311 base::Bind(&SpeechRecognizerImpl::DispatchEvent,
312 this, event_args));
315 // ----------------------- Core FSM implementation ---------------------------
316 // TODO(primiano): After the changes in the media package (r129173), this class
317 // slightly violates the SpeechRecognitionEventListener interface contract. In
318 // particular, it is not true anymore that this class can be freed after the
319 // OnRecognitionEnd event, since the audio_controller_.Close() asynchronous
320 // call can be still in progress after the end event. Currently, it does not
321 // represent a problem for the browser itself, since refcounting protects us
322 // against such race conditions. However, we should fix this in the next CLs.
323 // For instance, tests are currently working just because the
324 // TestAudioInputController is not closing asynchronously as the real controller
325 // does, but they will become flaky if TestAudioInputController will be fixed.
327 void SpeechRecognizerImpl::DispatchEvent(const FSMEventArgs& event_args) {
328 DCHECK_CURRENTLY_ON(BrowserThread::IO);
329 DCHECK_LE(event_args.event, EVENT_MAX_VALUE);
330 DCHECK_LE(state_, STATE_MAX_VALUE);
332 // Event dispatching must be sequential, otherwise it will break all the rules
333 // and the assumptions of the finite state automata model.
334 DCHECK(!is_dispatching_event_);
335 is_dispatching_event_ = true;
337 // Guard against the delegate freeing us until we finish processing the event.
338 scoped_refptr<SpeechRecognizerImpl> me(this);
340 if (event_args.event == EVENT_AUDIO_DATA) {
341 DCHECK(event_args.audio_data.get() != NULL);
342 ProcessAudioPipeline(*event_args.audio_data.get());
345 // The audio pipeline must be processed before the event dispatch, otherwise
346 // it would take actions according to the future state instead of the current.
347 state_ = ExecuteTransitionAndGetNextState(event_args);
348 is_dispatching_event_ = false;
351 SpeechRecognizerImpl::FSMState
352 SpeechRecognizerImpl::ExecuteTransitionAndGetNextState(
353 const FSMEventArgs& event_args) {
354 const FSMEvent event = event_args.event;
355 switch (state_) {
356 case STATE_IDLE:
357 switch (event) {
358 // TODO(primiano): restore UNREACHABLE_CONDITION on EVENT_ABORT and
359 // EVENT_STOP_CAPTURE below once speech input extensions are fixed.
360 case EVENT_ABORT:
361 return AbortSilently(event_args);
362 case EVENT_START:
363 return StartRecording(event_args);
364 case EVENT_STOP_CAPTURE:
365 return AbortSilently(event_args);
366 case EVENT_AUDIO_DATA: // Corner cases related to queued messages
367 case EVENT_ENGINE_RESULT: // being lately dispatched.
368 case EVENT_ENGINE_ERROR:
369 case EVENT_AUDIO_ERROR:
370 return DoNothing(event_args);
372 break;
373 case STATE_STARTING:
374 switch (event) {
375 case EVENT_ABORT:
376 return AbortWithError(event_args);
377 case EVENT_START:
378 return NotFeasible(event_args);
379 case EVENT_STOP_CAPTURE:
380 return AbortSilently(event_args);
381 case EVENT_AUDIO_DATA:
382 return StartRecognitionEngine(event_args);
383 case EVENT_ENGINE_RESULT:
384 return NotFeasible(event_args);
385 case EVENT_ENGINE_ERROR:
386 case EVENT_AUDIO_ERROR:
387 return AbortWithError(event_args);
389 break;
390 case STATE_ESTIMATING_ENVIRONMENT:
391 switch (event) {
392 case EVENT_ABORT:
393 return AbortWithError(event_args);
394 case EVENT_START:
395 return NotFeasible(event_args);
396 case EVENT_STOP_CAPTURE:
397 return StopCaptureAndWaitForResult(event_args);
398 case EVENT_AUDIO_DATA:
399 return WaitEnvironmentEstimationCompletion(event_args);
400 case EVENT_ENGINE_RESULT:
401 return ProcessIntermediateResult(event_args);
402 case EVENT_ENGINE_ERROR:
403 case EVENT_AUDIO_ERROR:
404 return AbortWithError(event_args);
406 break;
407 case STATE_WAITING_FOR_SPEECH:
408 switch (event) {
409 case EVENT_ABORT:
410 return AbortWithError(event_args);
411 case EVENT_START:
412 return NotFeasible(event_args);
413 case EVENT_STOP_CAPTURE:
414 return StopCaptureAndWaitForResult(event_args);
415 case EVENT_AUDIO_DATA:
416 return DetectUserSpeechOrTimeout(event_args);
417 case EVENT_ENGINE_RESULT:
418 return ProcessIntermediateResult(event_args);
419 case EVENT_ENGINE_ERROR:
420 case EVENT_AUDIO_ERROR:
421 return AbortWithError(event_args);
423 break;
424 case STATE_RECOGNIZING:
425 switch (event) {
426 case EVENT_ABORT:
427 return AbortWithError(event_args);
428 case EVENT_START:
429 return NotFeasible(event_args);
430 case EVENT_STOP_CAPTURE:
431 return StopCaptureAndWaitForResult(event_args);
432 case EVENT_AUDIO_DATA:
433 return DetectEndOfSpeech(event_args);
434 case EVENT_ENGINE_RESULT:
435 return ProcessIntermediateResult(event_args);
436 case EVENT_ENGINE_ERROR:
437 case EVENT_AUDIO_ERROR:
438 return AbortWithError(event_args);
440 break;
441 case STATE_WAITING_FINAL_RESULT:
442 switch (event) {
443 case EVENT_ABORT:
444 return AbortWithError(event_args);
445 case EVENT_START:
446 return NotFeasible(event_args);
447 case EVENT_STOP_CAPTURE:
448 case EVENT_AUDIO_DATA:
449 return DoNothing(event_args);
450 case EVENT_ENGINE_RESULT:
451 return ProcessFinalResult(event_args);
452 case EVENT_ENGINE_ERROR:
453 case EVENT_AUDIO_ERROR:
454 return AbortWithError(event_args);
456 break;
458 // TODO(primiano): remove this state when speech input extensions support
459 // will be removed and STATE_IDLE.EVENT_ABORT,EVENT_STOP_CAPTURE will be
460 // reset to NotFeasible (see TODO above).
461 case STATE_ENDED:
462 return DoNothing(event_args);
464 return NotFeasible(event_args);
467 // ----------- Contract for all the FSM evolution functions below -------------
468 // - Are guaranteed to be executed in the IO thread;
469 // - Are guaranteed to be not reentrant (themselves and each other);
470 // - event_args members are guaranteed to be stable during the call;
471 // - The class won't be freed in the meanwhile due to callbacks;
472 // - IsCapturingAudio() returns true if and only if audio_controller_ != NULL.
474 // TODO(primiano): the audio pipeline is currently serial. However, the
475 // clipper->endpointer->vumeter chain and the sr_engine could be parallelized.
476 // We should profile the execution to see if it would be worth or not.
477 void SpeechRecognizerImpl::ProcessAudioPipeline(const AudioChunk& raw_audio) {
478 const bool route_to_endpointer = state_ >= STATE_ESTIMATING_ENVIRONMENT &&
479 state_ <= STATE_RECOGNIZING;
480 const bool route_to_sr_engine = route_to_endpointer;
481 const bool route_to_vumeter = state_ >= STATE_WAITING_FOR_SPEECH &&
482 state_ <= STATE_RECOGNIZING;
483 const bool clip_detected = DetectClipping(raw_audio);
484 float rms = 0.0f;
486 num_samples_recorded_ += raw_audio.NumSamples();
488 if (route_to_endpointer)
489 endpointer_.ProcessAudio(raw_audio, &rms);
491 if (route_to_vumeter) {
492 DCHECK(route_to_endpointer); // Depends on endpointer due to |rms|.
493 UpdateSignalAndNoiseLevels(rms, clip_detected);
495 if (route_to_sr_engine) {
496 DCHECK(recognition_engine_.get() != NULL);
497 recognition_engine_->TakeAudioChunk(raw_audio);
501 SpeechRecognizerImpl::FSMState
502 SpeechRecognizerImpl::StartRecording(const FSMEventArgs&) {
503 DCHECK(recognition_engine_.get() != NULL);
504 DCHECK(!IsCapturingAudio());
505 const bool unit_test_is_active = (audio_manager_for_tests_ != NULL);
506 AudioManager* audio_manager = unit_test_is_active ?
507 audio_manager_for_tests_ :
508 AudioManager::Get();
509 DCHECK(audio_manager != NULL);
511 DVLOG(1) << "SpeechRecognizerImpl starting audio capture.";
512 num_samples_recorded_ = 0;
513 audio_level_ = 0;
514 listener()->OnRecognitionStart(session_id());
516 // TODO(xians): Check if the OS has the device with |device_id_|, return
517 // |SPEECH_AUDIO_ERROR_DETAILS_NO_MIC| if the target device does not exist.
518 if (!audio_manager->HasAudioInputDevices()) {
519 return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO_CAPTURE,
520 SPEECH_AUDIO_ERROR_DETAILS_NO_MIC));
523 int chunk_duration_ms = recognition_engine_->GetDesiredAudioChunkDurationMs();
525 AudioParameters in_params = audio_manager->GetInputStreamParameters(
526 device_id_);
527 if (!in_params.IsValid() && !unit_test_is_active) {
528 DLOG(ERROR) << "Invalid native audio input parameters";
529 return Abort(
530 SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO_CAPTURE));
533 // Audio converter shall provide audio based on these parameters as output.
534 // Hard coded, WebSpeech specific parameters are utilized here.
535 int frames_per_buffer = (kAudioSampleRate * chunk_duration_ms) / 1000;
536 AudioParameters output_parameters = AudioParameters(
537 AudioParameters::AUDIO_PCM_LOW_LATENCY, kChannelLayout, kAudioSampleRate,
538 kNumBitsPerAudioSample, frames_per_buffer);
539 DVLOG(1) << "SRI::output_parameters: "
540 << output_parameters.AsHumanReadableString();
542 // Audio converter will receive audio based on these parameters as input.
543 // On Windows we start by verifying that Core Audio is supported. If not,
544 // the WaveIn API is used and we might as well avoid all audio conversations
545 // since WaveIn does the conversion for us.
546 // TODO(henrika): this code should be moved to platform dependent audio
547 // managers.
548 bool use_native_audio_params = true;
549 #if defined(OS_WIN)
550 use_native_audio_params = media::CoreAudioUtil::IsSupported();
551 DVLOG_IF(1, !use_native_audio_params) << "Reverting to WaveIn for WebSpeech";
552 #endif
554 AudioParameters input_parameters = output_parameters;
555 if (use_native_audio_params && !unit_test_is_active) {
556 // Use native audio parameters but avoid opening up at the native buffer
557 // size. Instead use same frame size (in milliseconds) as WebSpeech uses.
558 // We rely on internal buffers in the audio back-end to fulfill this request
559 // and the idea is to simplify the audio conversion since each Convert()
560 // call will then render exactly one ProvideInput() call.
561 // in_params.sample_rate()
562 input_parameters = in_params;
563 frames_per_buffer =
564 ((in_params.sample_rate() * chunk_duration_ms) / 1000.0) + 0.5;
565 input_parameters.set_frames_per_buffer(frames_per_buffer);
566 DVLOG(1) << "SRI::input_parameters: "
567 << input_parameters.AsHumanReadableString();
570 // Create an audio converter which converts data between native input format
571 // and WebSpeech specific output format.
572 audio_converter_.reset(
573 new OnDataConverter(input_parameters, output_parameters));
575 audio_controller_ = AudioInputController::Create(
576 audio_manager, this, input_parameters, device_id_, NULL);
578 if (!audio_controller_.get()) {
579 return Abort(
580 SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO_CAPTURE));
583 audio_log_->OnCreated(0, input_parameters, device_id_);
585 // The endpointer needs to estimate the environment/background noise before
586 // starting to treat the audio as user input. We wait in the state
587 // ESTIMATING_ENVIRONMENT until such interval has elapsed before switching
588 // to user input mode.
589 endpointer_.SetEnvironmentEstimationMode();
590 audio_controller_->Record();
591 audio_log_->OnStarted(0);
592 return STATE_STARTING;
595 SpeechRecognizerImpl::FSMState
596 SpeechRecognizerImpl::StartRecognitionEngine(const FSMEventArgs& event_args) {
597 // This is the first audio packet captured, so the recognition engine is
598 // started and the delegate notified about the event.
599 DCHECK(recognition_engine_.get() != NULL);
600 recognition_engine_->StartRecognition();
601 listener()->OnAudioStart(session_id());
603 // This is a little hack, since TakeAudioChunk() is already called by
604 // ProcessAudioPipeline(). It is the best tradeoff, unless we allow dropping
605 // the first audio chunk captured after opening the audio device.
606 recognition_engine_->TakeAudioChunk(*(event_args.audio_data.get()));
607 return STATE_ESTIMATING_ENVIRONMENT;
610 SpeechRecognizerImpl::FSMState
611 SpeechRecognizerImpl::WaitEnvironmentEstimationCompletion(const FSMEventArgs&) {
612 DCHECK(endpointer_.IsEstimatingEnvironment());
613 if (GetElapsedTimeMs() >= kEndpointerEstimationTimeMs) {
614 endpointer_.SetUserInputMode();
615 listener()->OnEnvironmentEstimationComplete(session_id());
616 return STATE_WAITING_FOR_SPEECH;
617 } else {
618 return STATE_ESTIMATING_ENVIRONMENT;
622 SpeechRecognizerImpl::FSMState
623 SpeechRecognizerImpl::DetectUserSpeechOrTimeout(const FSMEventArgs&) {
624 if (endpointer_.DidStartReceivingSpeech()) {
625 listener()->OnSoundStart(session_id());
626 return STATE_RECOGNIZING;
627 } else if (GetElapsedTimeMs() >= kNoSpeechTimeoutMs) {
628 return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_NO_SPEECH));
630 return STATE_WAITING_FOR_SPEECH;
633 SpeechRecognizerImpl::FSMState
634 SpeechRecognizerImpl::DetectEndOfSpeech(const FSMEventArgs& event_args) {
635 if (endpointer_.speech_input_complete())
636 return StopCaptureAndWaitForResult(event_args);
637 return STATE_RECOGNIZING;
640 SpeechRecognizerImpl::FSMState
641 SpeechRecognizerImpl::StopCaptureAndWaitForResult(const FSMEventArgs&) {
642 DCHECK(state_ >= STATE_ESTIMATING_ENVIRONMENT && state_ <= STATE_RECOGNIZING);
644 DVLOG(1) << "Concluding recognition";
645 CloseAudioControllerAsynchronously();
646 recognition_engine_->AudioChunksEnded();
648 if (state_ > STATE_WAITING_FOR_SPEECH)
649 listener()->OnSoundEnd(session_id());
651 listener()->OnAudioEnd(session_id());
652 return STATE_WAITING_FINAL_RESULT;
655 SpeechRecognizerImpl::FSMState
656 SpeechRecognizerImpl::AbortSilently(const FSMEventArgs& event_args) {
657 DCHECK_NE(event_args.event, EVENT_AUDIO_ERROR);
658 DCHECK_NE(event_args.event, EVENT_ENGINE_ERROR);
659 return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_NONE));
662 SpeechRecognizerImpl::FSMState
663 SpeechRecognizerImpl::AbortWithError(const FSMEventArgs& event_args) {
664 if (event_args.event == EVENT_AUDIO_ERROR) {
665 return Abort(
666 SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO_CAPTURE));
667 } else if (event_args.event == EVENT_ENGINE_ERROR) {
668 return Abort(event_args.engine_error);
670 return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_ABORTED));
673 SpeechRecognizerImpl::FSMState SpeechRecognizerImpl::Abort(
674 const SpeechRecognitionError& error) {
675 if (IsCapturingAudio())
676 CloseAudioControllerAsynchronously();
678 DVLOG(1) << "SpeechRecognizerImpl canceling recognition. ";
680 // The recognition engine is initialized only after STATE_STARTING.
681 if (state_ > STATE_STARTING) {
682 DCHECK(recognition_engine_.get() != NULL);
683 recognition_engine_->EndRecognition();
686 if (state_ > STATE_WAITING_FOR_SPEECH && state_ < STATE_WAITING_FINAL_RESULT)
687 listener()->OnSoundEnd(session_id());
689 if (state_ > STATE_STARTING && state_ < STATE_WAITING_FINAL_RESULT)
690 listener()->OnAudioEnd(session_id());
692 if (error.code != SPEECH_RECOGNITION_ERROR_NONE)
693 listener()->OnRecognitionError(session_id(), error);
695 listener()->OnRecognitionEnd(session_id());
697 return STATE_ENDED;
700 SpeechRecognizerImpl::FSMState SpeechRecognizerImpl::ProcessIntermediateResult(
701 const FSMEventArgs& event_args) {
702 // Provisional results can occur only if explicitly enabled in the JS API.
703 DCHECK(provisional_results_);
705 // In continuous recognition, intermediate results can occur even when we are
706 // in the ESTIMATING_ENVIRONMENT or WAITING_FOR_SPEECH states (if the
707 // recognition engine is "faster" than our endpointer). In these cases we
708 // skip the endpointer and fast-forward to the RECOGNIZING state, with respect
709 // of the events triggering order.
710 if (state_ == STATE_ESTIMATING_ENVIRONMENT) {
711 DCHECK(endpointer_.IsEstimatingEnvironment());
712 endpointer_.SetUserInputMode();
713 listener()->OnEnvironmentEstimationComplete(session_id());
714 } else if (state_ == STATE_WAITING_FOR_SPEECH) {
715 listener()->OnSoundStart(session_id());
716 } else {
717 DCHECK_EQ(STATE_RECOGNIZING, state_);
720 listener()->OnRecognitionResults(session_id(), event_args.engine_results);
721 return STATE_RECOGNIZING;
724 SpeechRecognizerImpl::FSMState
725 SpeechRecognizerImpl::ProcessFinalResult(const FSMEventArgs& event_args) {
726 const SpeechRecognitionResults& results = event_args.engine_results;
727 SpeechRecognitionResults::const_iterator i = results.begin();
728 bool provisional_results_pending = false;
729 bool results_are_empty = true;
730 for (; i != results.end(); ++i) {
731 const SpeechRecognitionResult& result = *i;
732 if (result.is_provisional) {
733 DCHECK(provisional_results_);
734 provisional_results_pending = true;
735 } else if (results_are_empty) {
736 results_are_empty = result.hypotheses.empty();
740 if (provisional_results_pending) {
741 listener()->OnRecognitionResults(session_id(), results);
742 // We don't end the recognition if a provisional result is received in
743 // STATE_WAITING_FINAL_RESULT. A definitive result will come next and will
744 // end the recognition.
745 return state_;
748 recognition_engine_->EndRecognition();
750 if (!results_are_empty) {
751 // We could receive an empty result (which we won't propagate further)
752 // in the following (continuous) scenario:
753 // 1. The caller start pushing audio and receives some results;
754 // 2. A |StopAudioCapture| is issued later;
755 // 3. The final audio frames captured in the interval ]1,2] do not lead to
756 // any result (nor any error);
757 // 4. The speech recognition engine, therefore, emits an empty result to
758 // notify that the recognition is ended with no error, yet neither any
759 // further result.
760 listener()->OnRecognitionResults(session_id(), results);
763 listener()->OnRecognitionEnd(session_id());
764 return STATE_ENDED;
767 SpeechRecognizerImpl::FSMState
768 SpeechRecognizerImpl::DoNothing(const FSMEventArgs&) const {
769 return state_; // Just keep the current state.
772 SpeechRecognizerImpl::FSMState
773 SpeechRecognizerImpl::NotFeasible(const FSMEventArgs& event_args) {
774 NOTREACHED() << "Unfeasible event " << event_args.event
775 << " in state " << state_;
776 return state_;
779 void SpeechRecognizerImpl::CloseAudioControllerAsynchronously() {
780 DCHECK(IsCapturingAudio());
781 DVLOG(1) << "SpeechRecognizerImpl closing audio controller.";
782 // Issues a Close on the audio controller, passing an empty callback. The only
783 // purpose of such callback is to keep the audio controller refcounted until
784 // Close has completed (in the audio thread) and automatically destroy it
785 // afterwards (upon return from OnAudioClosed).
786 audio_controller_->Close(base::Bind(&SpeechRecognizerImpl::OnAudioClosed,
787 this, audio_controller_));
788 audio_controller_ = NULL; // The controller is still refcounted by Bind.
789 audio_log_->OnClosed(0);
792 int SpeechRecognizerImpl::GetElapsedTimeMs() const {
793 return (num_samples_recorded_ * 1000) / kAudioSampleRate;
796 void SpeechRecognizerImpl::UpdateSignalAndNoiseLevels(const float& rms,
797 bool clip_detected) {
798 // Calculate the input volume to display in the UI, smoothing towards the
799 // new level.
800 // TODO(primiano): Do we really need all this floating point arith here?
801 // Perhaps it might be quite expensive on mobile.
802 float level = (rms - kAudioMeterMinDb) /
803 (kAudioMeterDbRange / kAudioMeterRangeMaxUnclipped);
804 level = std::min(std::max(0.0f, level), kAudioMeterRangeMaxUnclipped);
805 const float smoothing_factor = (level > audio_level_) ? kUpSmoothingFactor :
806 kDownSmoothingFactor;
807 audio_level_ += (level - audio_level_) * smoothing_factor;
809 float noise_level = (endpointer_.NoiseLevelDb() - kAudioMeterMinDb) /
810 (kAudioMeterDbRange / kAudioMeterRangeMaxUnclipped);
811 noise_level = std::min(std::max(0.0f, noise_level),
812 kAudioMeterRangeMaxUnclipped);
814 listener()->OnAudioLevelsChange(
815 session_id(), clip_detected ? 1.0f : audio_level_, noise_level);
818 void SpeechRecognizerImpl::SetAudioManagerForTesting(
819 AudioManager* audio_manager) {
820 audio_manager_for_tests_ = audio_manager;
823 SpeechRecognizerImpl::FSMEventArgs::FSMEventArgs(FSMEvent event_value)
824 : event(event_value),
825 audio_data(NULL),
826 engine_error(SPEECH_RECOGNITION_ERROR_NONE) {
829 SpeechRecognizerImpl::FSMEventArgs::~FSMEventArgs() {
832 } // namespace content