[AndroidWebViewShell] Add MediaStream API layout tests.
[chromium-blink-merge.git] / content / renderer / speech_recognition_dispatcher.h
blob10241956e8142013e10bdd42b48696e61f484cf5
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_
6 #define CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_
8 #include <map>
10 #include "base/basictypes.h"
11 #include "base/memory/scoped_ptr.h"
12 #include "base/memory/shared_memory.h"
13 #include "base/sync_socket.h"
14 #include "content/public/common/speech_recognition_result.h"
15 #include "content/public/renderer/render_view_observer.h"
16 #include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
17 #include "third_party/WebKit/public/platform/WebVector.h"
18 #include "third_party/WebKit/public/web/WebSpeechRecognitionHandle.h"
19 #include "third_party/WebKit/public/web/WebSpeechRecognizer.h"
21 namespace media {
22 class AudioParameters;
25 namespace content {
26 class RenderViewImpl;
27 #if defined(ENABLE_WEBRTC)
28 class SpeechRecognitionAudioSink;
29 #endif
30 struct SpeechRecognitionError;
31 struct SpeechRecognitionResult;
33 // SpeechRecognitionDispatcher is a delegate for methods used by WebKit for
34 // scripted JS speech APIs. It's the complement of
35 // SpeechRecognitionDispatcherHost (owned by RenderViewHost).
36 class SpeechRecognitionDispatcher : public RenderViewObserver,
37 public blink::WebSpeechRecognizer {
38 public:
39 explicit SpeechRecognitionDispatcher(RenderViewImpl* render_view);
40 virtual ~SpeechRecognitionDispatcher();
42 // Aborts all speech recognitions.
43 void AbortAllRecognitions();
45 private:
46 // RenderViewObserver implementation.
47 bool OnMessageReceived(const IPC::Message& message) override;
49 // blink::WebSpeechRecognizer implementation.
50 virtual void start(const blink::WebSpeechRecognitionHandle&,
51 const blink::WebSpeechRecognitionParams&,
52 blink::WebSpeechRecognizerClient*);
53 virtual void stop(const blink::WebSpeechRecognitionHandle&,
54 blink::WebSpeechRecognizerClient*);
55 virtual void abort(const blink::WebSpeechRecognitionHandle&,
56 blink::WebSpeechRecognizerClient*);
58 void OnRecognitionStarted(int request_id);
59 void OnAudioStarted(int request_id);
60 void OnSoundStarted(int request_id);
61 void OnSoundEnded(int request_id);
62 void OnAudioEnded(int request_id);
63 void OnErrorOccurred(int request_id, const SpeechRecognitionError& error);
64 void OnRecognitionEnded(int request_id);
65 void OnResultsRetrieved(int request_id,
66 const SpeechRecognitionResults& result);
67 void OnAudioReceiverReady(int session_id,
68 const media::AudioParameters& params,
69 const base::SharedMemoryHandle handle,
70 const base::SyncSocket::TransitDescriptor socket);
72 void ResetAudioSink();
74 int GetOrCreateIDForHandle(const blink::WebSpeechRecognitionHandle& handle);
75 bool HandleExists(const blink::WebSpeechRecognitionHandle& handle);
76 const blink::WebSpeechRecognitionHandle& GetHandleFromID(int handle_id);
78 // The WebKit client class that we use to send events back to the JS world.
79 blink::WebSpeechRecognizerClient* recognizer_client_;
81 #if defined(ENABLE_WEBRTC)
82 // Media stream audio track that the speech recognition connects to.
83 // Accessed on the render thread.
84 blink::WebMediaStreamTrack audio_track_;
86 // Audio sink used to provide audio from the track.
87 scoped_ptr<SpeechRecognitionAudioSink> speech_audio_sink_;
88 #endif
90 typedef std::map<int, blink::WebSpeechRecognitionHandle> HandleMap;
91 HandleMap handle_map_;
92 int next_id_;
94 DISALLOW_COPY_AND_ASSIGN(SpeechRecognitionDispatcher);
97 } // namespace content
99 #endif // CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_