IndexedDBFactory now ForceCloses databases.
[chromium-blink-merge.git] / content / renderer / media / webrtc_local_audio_track.cc
bloba8fcf27adc4f5400ef001a0809f24d98b6389de9
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/renderer/media/webrtc_local_audio_track.h"
7 #include "content/public/renderer/media_stream_audio_sink.h"
8 #include "content/renderer/media/media_stream_audio_sink_owner.h"
9 #include "content/renderer/media/media_stream_audio_track_sink.h"
10 #include "content/renderer/media/peer_connection_audio_sink_owner.h"
11 #include "content/renderer/media/webaudio_capturer_source.h"
12 #include "content/renderer/media/webrtc_audio_capturer.h"
13 #include "content/renderer/media/webrtc_local_audio_source_provider.h"
14 #include "media/base/audio_fifo.h"
15 #include "third_party/libjingle/source/talk/media/base/audiorenderer.h"
17 namespace content {
19 static const size_t kMaxNumberOfBuffersInFifo = 2;
20 static const char kAudioTrackKind[] = "audio";
22 namespace {
24 using webrtc::MediaConstraintsInterface;
26 // This helper function checks if any audio constraints are set that require
27 // audio processing to be applied. Right now this is a big, single switch for
28 // all of the properties, but in the future they'll be handled one by one.
29 bool NeedsAudioProcessing(
30 const webrtc::MediaConstraintsInterface* constraints) {
31 if (!constraints)
32 return false;
34 static const char* kAudioProcessingProperties[] = {
35 MediaConstraintsInterface::kEchoCancellation,
36 MediaConstraintsInterface::kExperimentalEchoCancellation,
37 MediaConstraintsInterface::kAutoGainControl,
38 MediaConstraintsInterface::kExperimentalAutoGainControl,
39 MediaConstraintsInterface::kNoiseSuppression,
40 MediaConstraintsInterface::kHighpassFilter,
41 MediaConstraintsInterface::kTypingNoiseDetection,
44 for (size_t i = 0; i < arraysize(kAudioProcessingProperties); ++i) {
45 bool value = false;
46 if (webrtc::FindConstraint(constraints, kAudioProcessingProperties[i],
47 &value, NULL) &&
48 value) {
49 return true;
53 return false;
56 } // namespace.
58 // This is a temporary audio buffer with parameters used to send data to
59 // callbacks.
60 class WebRtcLocalAudioTrack::ConfiguredBuffer {
61 public:
62 ConfiguredBuffer() {}
63 virtual ~ConfiguredBuffer() {}
65 void Configure(const media::AudioParameters& params) {
66 DCHECK(params.IsValid());
68 // PeerConnection uses 10ms as the sink buffer size as its native packet
69 // size. We use the native PeerConnection buffer size to achieve the best
70 // performance when a PeerConnection is connected with a track.
71 int sink_buffer_size = params.sample_rate() / 100;
72 if (params.frames_per_buffer() < sink_buffer_size) {
73 // When the source is running with a buffer size smaller than the peer
74 // connection buffer size, that means no PeerConnection is connected
75 // to the track, use the same buffer size as the incoming format to
76 // avoid extra FIFO for WebAudio.
77 sink_buffer_size = params.frames_per_buffer();
79 params_.Reset(params.format(), params.channel_layout(), params.channels(),
80 params.input_channels(), params.sample_rate(),
81 params.bits_per_sample(), sink_buffer_size);
83 audio_wrapper_ = media::AudioBus::Create(params_.channels(),
84 params_.frames_per_buffer());
85 buffer_.reset(new int16[params_.frames_per_buffer() * params_.channels()]);
87 // The size of the FIFO should be at least twice of the source buffer size
88 // or twice of the sink buffer size.
89 int buffer_size = std::max(
90 kMaxNumberOfBuffersInFifo * params.frames_per_buffer(),
91 kMaxNumberOfBuffersInFifo * params_.frames_per_buffer());
92 fifo_.reset(new media::AudioFifo(params_.channels(), buffer_size));
95 void Push(media::AudioBus* audio_source) {
96 DCHECK(fifo_->frames() + audio_source->frames() <= fifo_->max_frames());
97 fifo_->Push(audio_source);
100 bool Consume() {
101 if (fifo_->frames() < audio_wrapper_->frames())
102 return false;
104 fifo_->Consume(audio_wrapper_.get(), 0, audio_wrapper_->frames());
105 audio_wrapper_->ToInterleaved(audio_wrapper_->frames(),
106 params_.bits_per_sample() / 8,
107 buffer());
108 return true;
111 int16* buffer() const { return buffer_.get(); }
113 // Format of the output audio buffer.
114 const media::AudioParameters& params() const { return params_; }
116 private:
117 media::AudioParameters params_;
118 scoped_ptr<media::AudioBus> audio_wrapper_;
119 scoped_ptr<media::AudioFifo> fifo_;
120 scoped_ptr<int16[]> buffer_;
123 scoped_refptr<WebRtcLocalAudioTrack> WebRtcLocalAudioTrack::Create(
124 const std::string& id,
125 const scoped_refptr<WebRtcAudioCapturer>& capturer,
126 WebAudioCapturerSource* webaudio_source,
127 webrtc::AudioSourceInterface* track_source,
128 const webrtc::MediaConstraintsInterface* constraints) {
129 talk_base::RefCountedObject<WebRtcLocalAudioTrack>* track =
130 new talk_base::RefCountedObject<WebRtcLocalAudioTrack>(
131 id, capturer, webaudio_source, track_source, constraints);
132 return track;
135 WebRtcLocalAudioTrack::WebRtcLocalAudioTrack(
136 const std::string& label,
137 const scoped_refptr<WebRtcAudioCapturer>& capturer,
138 WebAudioCapturerSource* webaudio_source,
139 webrtc::AudioSourceInterface* track_source,
140 const webrtc::MediaConstraintsInterface* constraints)
141 : webrtc::MediaStreamTrack<webrtc::AudioTrackInterface>(label),
142 capturer_(capturer),
143 webaudio_source_(webaudio_source),
144 track_source_(track_source),
145 need_audio_processing_(NeedsAudioProcessing(constraints)),
146 buffer_(new ConfiguredBuffer()) {
147 DCHECK(capturer.get() || webaudio_source);
148 if (!webaudio_source_) {
149 source_provider_.reset(new WebRtcLocalAudioSourceProvider());
150 AddSink(source_provider_.get());
152 DVLOG(1) << "WebRtcLocalAudioTrack::WebRtcLocalAudioTrack()";
155 WebRtcLocalAudioTrack::~WebRtcLocalAudioTrack() {
156 DCHECK(main_render_thread_checker_.CalledOnValidThread());
157 DVLOG(1) << "WebRtcLocalAudioTrack::~WebRtcLocalAudioTrack()";
158 // Users might not call Stop() on the track.
159 Stop();
162 void WebRtcLocalAudioTrack::Capture(media::AudioBus* audio_source,
163 int audio_delay_milliseconds,
164 int volume,
165 bool key_pressed) {
166 DCHECK(capture_thread_checker_.CalledOnValidThread());
167 scoped_refptr<WebRtcAudioCapturer> capturer;
168 std::vector<int> voe_channels;
169 SinkList::ItemList sinks;
170 SinkList::ItemList sinks_to_notify_format;
171 bool is_webaudio_source = false;
173 base::AutoLock auto_lock(lock_);
174 capturer = capturer_;
175 voe_channels = voe_channels_;
176 sinks = sinks_.Items();
177 sinks_.RetrieveAndClearTags(&sinks_to_notify_format);
178 is_webaudio_source = (webaudio_source_.get() != NULL);
181 // Notify the tracks on when the format changes. This will do nothing if
182 // |sinks_to_notify_format| is empty.
183 for (SinkList::ItemList::const_iterator it = sinks_to_notify_format.begin();
184 it != sinks_to_notify_format.end(); ++it) {
185 (*it)->OnSetFormat(buffer_->params());
188 // Push the data to the fifo.
189 buffer_->Push(audio_source);
191 // When the source is WebAudio, turn off the audio processing if the delay
192 // value is 0 even though the constraint is set to true. In such case, it
193 // indicates the data is not from microphone.
194 // TODO(xians): remove the flag when supporting one APM per audio track.
195 // See crbug/264611 for details.
196 bool need_audio_processing = need_audio_processing_;
197 if (is_webaudio_source && need_audio_processing)
198 need_audio_processing = (audio_delay_milliseconds != 0);
200 int current_volume = volume;
201 while (buffer_->Consume()) {
202 // Feed the data to the sinks.
203 // TODO (jiayl): we should not pass the real audio data down if the track is
204 // disabled. This is currently done so to feed input to WebRTC typing
205 // detection and should be changed when audio processing is moved from
206 // WebRTC to the track.
207 for (SinkList::ItemList::const_iterator it = sinks.begin();
208 it != sinks.end();
209 ++it) {
210 int new_volume = (*it)->OnData(buffer_->buffer(),
211 buffer_->params().sample_rate(),
212 buffer_->params().channels(),
213 buffer_->params().frames_per_buffer(),
214 voe_channels,
215 audio_delay_milliseconds,
216 current_volume,
217 need_audio_processing,
218 key_pressed);
219 if (new_volume != 0 && capturer.get()) {
220 // Feed the new volume to WebRtc while changing the volume on the
221 // browser.
222 capturer->SetVolume(new_volume);
223 current_volume = new_volume;
229 void WebRtcLocalAudioTrack::OnSetFormat(
230 const media::AudioParameters& params) {
231 DVLOG(1) << "WebRtcLocalAudioTrack::OnSetFormat()";
232 // If the source is restarted, we might have changed to another capture
233 // thread.
234 capture_thread_checker_.DetachFromThread();
235 DCHECK(capture_thread_checker_.CalledOnValidThread());
237 DCHECK(params.IsValid());
238 buffer_->Configure(params);
240 base::AutoLock auto_lock(lock_);
241 // Remember to notify all sinks of the new format.
242 sinks_.TagAll();
245 void WebRtcLocalAudioTrack::AddChannel(int channel_id) {
246 DVLOG(1) << "WebRtcLocalAudioTrack::AddChannel(channel_id="
247 << channel_id << ")";
248 base::AutoLock auto_lock(lock_);
249 if (std::find(voe_channels_.begin(), voe_channels_.end(), channel_id) !=
250 voe_channels_.end()) {
251 // We need to handle the case when the same channel is connected to the
252 // track more than once.
253 return;
256 voe_channels_.push_back(channel_id);
259 void WebRtcLocalAudioTrack::RemoveChannel(int channel_id) {
260 DVLOG(1) << "WebRtcLocalAudioTrack::RemoveChannel(channel_id="
261 << channel_id << ")";
262 base::AutoLock auto_lock(lock_);
263 std::vector<int>::iterator iter =
264 std::find(voe_channels_.begin(), voe_channels_.end(), channel_id);
265 DCHECK(iter != voe_channels_.end());
266 voe_channels_.erase(iter);
269 // webrtc::AudioTrackInterface implementation.
270 webrtc::AudioSourceInterface* WebRtcLocalAudioTrack::GetSource() const {
271 return track_source_;
274 cricket::AudioRenderer* WebRtcLocalAudioTrack::GetRenderer() {
275 return this;
278 std::string WebRtcLocalAudioTrack::kind() const {
279 return kAudioTrackKind;
282 void WebRtcLocalAudioTrack::AddSink(MediaStreamAudioSink* sink) {
283 DCHECK(main_render_thread_checker_.CalledOnValidThread());
284 DVLOG(1) << "WebRtcLocalAudioTrack::AddSink()";
285 base::AutoLock auto_lock(lock_);
287 // Verify that |sink| is not already added to the list.
288 DCHECK(!sinks_.Contains(
289 MediaStreamAudioTrackSink::WrapsMediaStreamSink(sink)));
291 // Create (and add to the list) a new MediaStreamAudioTrackSink
292 // which owns the |sink| and delagates all calls to the
293 // MediaStreamAudioSink interface. It will be tagged in the list, so
294 // we remember to call OnSetFormat() on the new sink.
295 scoped_refptr<MediaStreamAudioTrackSink> sink_owner(
296 new MediaStreamAudioSinkOwner(sink));
297 sinks_.AddAndTag(sink_owner);
300 void WebRtcLocalAudioTrack::RemoveSink(MediaStreamAudioSink* sink) {
301 DCHECK(main_render_thread_checker_.CalledOnValidThread());
302 DVLOG(1) << "WebRtcLocalAudioTrack::RemoveSink()";
304 base::AutoLock auto_lock(lock_);
306 scoped_refptr<MediaStreamAudioTrackSink> removed_item = sinks_.Remove(
307 MediaStreamAudioTrackSink::WrapsMediaStreamSink(sink));
309 // Clear the delegate to ensure that no more capture callbacks will
310 // be sent to this sink. Also avoids a possible crash which can happen
311 // if this method is called while capturing is active.
312 if (removed_item.get())
313 removed_item->Reset();
316 void WebRtcLocalAudioTrack::AddSink(PeerConnectionAudioSink* sink) {
317 DCHECK(main_render_thread_checker_.CalledOnValidThread());
318 DVLOG(1) << "WebRtcLocalAudioTrack::AddSink()";
319 base::AutoLock auto_lock(lock_);
321 // Verify that |sink| is not already added to the list.
322 DCHECK(!sinks_.Contains(
323 MediaStreamAudioTrackSink::WrapsPeerConnectionSink(sink)));
325 // Create (and add to the list) a new MediaStreamAudioTrackSink
326 // which owns the |sink| and delagates all calls to the
327 // MediaStreamAudioSink interface. It will be tagged in the list, so
328 // we remember to call OnSetFormat() on the new sink.
329 scoped_refptr<MediaStreamAudioTrackSink> sink_owner(
330 new PeerConnectionAudioSinkOwner(sink));
331 sinks_.AddAndTag(sink_owner);
334 void WebRtcLocalAudioTrack::RemoveSink(PeerConnectionAudioSink* sink) {
335 DCHECK(main_render_thread_checker_.CalledOnValidThread());
336 DVLOG(1) << "WebRtcLocalAudioTrack::RemoveSink()";
338 base::AutoLock auto_lock(lock_);
340 scoped_refptr<MediaStreamAudioTrackSink> removed_item = sinks_.Remove(
341 MediaStreamAudioTrackSink::WrapsPeerConnectionSink(sink));
342 // Clear the delegate to ensure that no more capture callbacks will
343 // be sent to this sink. Also avoids a possible crash which can happen
344 // if this method is called while capturing is active.
345 if (removed_item.get())
346 removed_item->Reset();
349 void WebRtcLocalAudioTrack::Start() {
350 DCHECK(main_render_thread_checker_.CalledOnValidThread());
351 DVLOG(1) << "WebRtcLocalAudioTrack::Start()";
352 if (webaudio_source_.get()) {
353 // If the track is hooking up with WebAudio, do NOT add the track to the
354 // capturer as its sink otherwise two streams in different clock will be
355 // pushed through the same track.
356 webaudio_source_->Start(this, capturer_.get());
357 } else if (capturer_.get()) {
358 capturer_->AddTrack(this);
361 SinkList::ItemList sinks;
363 base::AutoLock auto_lock(lock_);
364 sinks = sinks_.Items();
366 for (SinkList::ItemList::const_iterator it = sinks.begin();
367 it != sinks.end();
368 ++it) {
369 (*it)->OnReadyStateChanged(blink::WebMediaStreamSource::ReadyStateLive);
373 void WebRtcLocalAudioTrack::Stop() {
374 DCHECK(main_render_thread_checker_.CalledOnValidThread());
375 DVLOG(1) << "WebRtcLocalAudioTrack::Stop()";
376 if (!capturer_.get() && !webaudio_source_.get())
377 return;
379 if (webaudio_source_.get()) {
380 // Called Stop() on the |webaudio_source_| explicitly so that
381 // |webaudio_source_| won't push more data to the track anymore.
382 // Also note that the track is not registered as a sink to the |capturer_|
383 // in such case and no need to call RemoveTrack().
384 webaudio_source_->Stop();
385 } else {
386 // It is necessary to call RemoveTrack on the |capturer_| to avoid getting
387 // audio callback after Stop().
388 capturer_->RemoveTrack(this);
391 // Protect the pointers using the lock when accessing |sinks_| and
392 // setting the |capturer_| to NULL.
393 SinkList::ItemList sinks;
395 base::AutoLock auto_lock(lock_);
396 sinks = sinks_.Items();
397 sinks_.Clear();
398 webaudio_source_ = NULL;
399 capturer_ = NULL;
402 for (SinkList::ItemList::const_iterator it = sinks.begin();
403 it != sinks.end();
404 ++it){
405 (*it)->OnReadyStateChanged(blink::WebMediaStreamSource::ReadyStateEnded);
406 (*it)->Reset();
410 } // namespace content