Report errors from ChromiumEnv::GetChildren in Posix.
[chromium-blink-merge.git] / media / audio / mac / audio_synchronized_mac.cc
bloba9bc88e2bd3be7205dab6062ed63fa44d2db0b7c
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "media/audio/mac/audio_synchronized_mac.h"
7 #include <CoreServices/CoreServices.h>
8 #include <algorithm>
10 #include "base/basictypes.h"
11 #include "base/debug/trace_event.h"
12 #include "base/logging.h"
13 #include "base/mac/mac_logging.h"
14 #include "media/audio/mac/audio_manager_mac.h"
15 #include "media/base/channel_mixer.h"
17 namespace media {
19 static const int kHardwareBufferSize = 128;
20 static const int kFifoSize = 16384;
22 // TODO(crogers): handle the non-stereo case.
23 static const int kChannels = 2;
25 // This value was determined empirically for minimum latency while still
26 // guarding against FIFO under-runs.
27 static const int kBaseTargetFifoFrames = 256 + 64;
29 // If the input and output sample-rate don't match, then we need to maintain
30 // an additional safety margin due to the callback timing jitter and the
31 // varispeed buffering. This value was empirically tuned.
32 static const int kAdditionalTargetFifoFrames = 128;
34 static void ZeroBufferList(AudioBufferList* buffer_list) {
35 for (size_t i = 0; i < buffer_list->mNumberBuffers; ++i)
36 memset(buffer_list->mBuffers[i].mData,
38 buffer_list->mBuffers[i].mDataByteSize);
41 static void WrapBufferList(AudioBufferList* buffer_list,
42 AudioBus* bus,
43 int frames) {
44 DCHECK(buffer_list);
45 DCHECK(bus);
46 int channels = bus->channels();
47 int buffer_list_channels = buffer_list->mNumberBuffers;
49 // Copy pointers from AudioBufferList.
50 int source_idx = 0;
51 for (int i = 0; i < channels; ++i) {
52 bus->SetChannelData(
53 i, static_cast<float*>(buffer_list->mBuffers[source_idx].mData));
55 // It's ok to pass in a |buffer_list| with fewer channels, in which
56 // case we just duplicate the last channel.
57 if (source_idx < buffer_list_channels - 1)
58 ++source_idx;
61 // Finally set the actual length.
62 bus->set_frames(frames);
65 AudioSynchronizedStream::AudioSynchronizedStream(
66 AudioManagerMac* manager,
67 const AudioParameters& params,
68 AudioDeviceID input_id,
69 AudioDeviceID output_id)
70 : manager_(manager),
71 params_(params),
72 input_sample_rate_(0),
73 output_sample_rate_(0),
74 input_id_(input_id),
75 output_id_(output_id),
76 input_buffer_list_(NULL),
77 fifo_(kChannels, kFifoSize),
78 target_fifo_frames_(kBaseTargetFifoFrames),
79 average_delta_(0.0),
80 fifo_rate_compensation_(1.0),
81 input_unit_(0),
82 varispeed_unit_(0),
83 output_unit_(0),
84 first_input_time_(-1),
85 is_running_(false),
86 hardware_buffer_size_(kHardwareBufferSize),
87 channels_(kChannels) {
88 VLOG(1) << "AudioSynchronizedStream::AudioSynchronizedStream()";
91 AudioSynchronizedStream::~AudioSynchronizedStream() {
92 DCHECK(!input_unit_);
93 DCHECK(!output_unit_);
94 DCHECK(!varispeed_unit_);
97 bool AudioSynchronizedStream::Open() {
98 if (params_.channels() != kChannels) {
99 LOG(ERROR) << "Only stereo output is currently supported.";
100 return false;
103 // Create the input, output, and varispeed AudioUnits.
104 OSStatus result = CreateAudioUnits();
105 if (result != noErr) {
106 LOG(ERROR) << "Cannot create AudioUnits.";
107 return false;
110 result = SetupInput(input_id_);
111 if (result != noErr) {
112 LOG(ERROR) << "Error configuring input AudioUnit.";
113 return false;
116 result = SetupOutput(output_id_);
117 if (result != noErr) {
118 LOG(ERROR) << "Error configuring output AudioUnit.";
119 return false;
122 result = SetupCallbacks();
123 if (result != noErr) {
124 LOG(ERROR) << "Error setting up callbacks on AudioUnits.";
125 return false;
128 result = SetupStreamFormats();
129 if (result != noErr) {
130 LOG(ERROR) << "Error configuring stream formats on AudioUnits.";
131 return false;
134 AllocateInputData();
136 // Final initialization of the AudioUnits.
137 result = AudioUnitInitialize(input_unit_);
138 if (result != noErr) {
139 LOG(ERROR) << "Error initializing input AudioUnit.";
140 return false;
143 result = AudioUnitInitialize(output_unit_);
144 if (result != noErr) {
145 LOG(ERROR) << "Error initializing output AudioUnit.";
146 return false;
149 result = AudioUnitInitialize(varispeed_unit_);
150 if (result != noErr) {
151 LOG(ERROR) << "Error initializing varispeed AudioUnit.";
152 return false;
155 if (input_sample_rate_ != output_sample_rate_) {
156 // Add extra safety margin.
157 target_fifo_frames_ += kAdditionalTargetFifoFrames;
160 // Buffer initial silence corresponding to target I/O buffering.
161 fifo_.Clear();
162 scoped_ptr<AudioBus> silence =
163 AudioBus::Create(channels_, target_fifo_frames_);
164 silence->Zero();
165 fifo_.Push(silence.get());
167 return true;
170 void AudioSynchronizedStream::Close() {
171 DCHECK(!is_running_);
173 if (input_buffer_list_) {
174 free(input_buffer_list_);
175 input_buffer_list_ = 0;
176 input_bus_.reset(NULL);
177 wrapper_bus_.reset(NULL);
180 if (input_unit_) {
181 AudioUnitUninitialize(input_unit_);
182 CloseComponent(input_unit_);
185 if (output_unit_) {
186 AudioUnitUninitialize(output_unit_);
187 CloseComponent(output_unit_);
190 if (varispeed_unit_) {
191 AudioUnitUninitialize(varispeed_unit_);
192 CloseComponent(varispeed_unit_);
195 input_unit_ = NULL;
196 output_unit_ = NULL;
197 varispeed_unit_ = NULL;
199 // Inform the audio manager that we have been closed. This can cause our
200 // destruction.
201 manager_->ReleaseOutputStream(this);
204 void AudioSynchronizedStream::Start(AudioSourceCallback* callback) {
205 DCHECK(callback);
206 DCHECK(input_unit_);
207 DCHECK(output_unit_);
208 DCHECK(varispeed_unit_);
210 if (is_running_ || !input_unit_ || !output_unit_ || !varispeed_unit_)
211 return;
213 source_ = callback;
215 // Reset state variables each time we Start().
216 fifo_rate_compensation_ = 1.0;
217 average_delta_ = 0.0;
219 OSStatus result = noErr;
221 if (!is_running_) {
222 first_input_time_ = -1;
224 result = AudioOutputUnitStart(input_unit_);
225 OSSTATUS_DCHECK(result == noErr, result);
227 if (result == noErr) {
228 result = AudioOutputUnitStart(output_unit_);
229 OSSTATUS_DCHECK(result == noErr, result);
233 is_running_ = true;
236 void AudioSynchronizedStream::Stop() {
237 OSStatus result = noErr;
238 if (is_running_) {
239 result = AudioOutputUnitStop(input_unit_);
240 OSSTATUS_DCHECK(result == noErr, result);
242 if (result == noErr) {
243 result = AudioOutputUnitStop(output_unit_);
244 OSSTATUS_DCHECK(result == noErr, result);
248 if (result == noErr)
249 is_running_ = false;
252 bool AudioSynchronizedStream::IsRunning() {
253 return is_running_;
256 // TODO(crogers): implement - or remove from AudioOutputStream.
257 void AudioSynchronizedStream::SetVolume(double volume) {}
258 void AudioSynchronizedStream::GetVolume(double* volume) {}
260 OSStatus AudioSynchronizedStream::SetOutputDeviceAsCurrent(
261 AudioDeviceID output_id) {
262 OSStatus result = noErr;
264 // Get the default output device if device is unknown.
265 if (output_id == kAudioDeviceUnknown) {
266 AudioObjectPropertyAddress pa;
267 pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
268 pa.mScope = kAudioObjectPropertyScopeGlobal;
269 pa.mElement = kAudioObjectPropertyElementMaster;
270 UInt32 size = sizeof(output_id);
272 result = AudioObjectGetPropertyData(
273 kAudioObjectSystemObject,
274 &pa,
277 &size,
278 &output_id);
280 OSSTATUS_DCHECK(result == noErr, result);
281 if (result != noErr)
282 return result;
285 // Set the render frame size.
286 UInt32 frame_size = hardware_buffer_size_;
287 AudioObjectPropertyAddress pa;
288 pa.mSelector = kAudioDevicePropertyBufferFrameSize;
289 pa.mScope = kAudioDevicePropertyScopeInput;
290 pa.mElement = kAudioObjectPropertyElementMaster;
291 result = AudioObjectSetPropertyData(
292 output_id,
293 &pa,
296 sizeof(frame_size),
297 &frame_size);
299 OSSTATUS_DCHECK(result == noErr, result);
300 if (result != noErr)
301 return result;
303 output_info_.Initialize(output_id, false);
305 // Set the Current Device to the Default Output Unit.
306 result = AudioUnitSetProperty(
307 output_unit_,
308 kAudioOutputUnitProperty_CurrentDevice,
309 kAudioUnitScope_Global,
311 &output_info_.id_,
312 sizeof(output_info_.id_));
314 OSSTATUS_DCHECK(result == noErr, result);
315 return result;
318 OSStatus AudioSynchronizedStream::SetInputDeviceAsCurrent(
319 AudioDeviceID input_id) {
320 OSStatus result = noErr;
322 // Get the default input device if device is unknown.
323 if (input_id == kAudioDeviceUnknown) {
324 AudioObjectPropertyAddress pa;
325 pa.mSelector = kAudioHardwarePropertyDefaultInputDevice;
326 pa.mScope = kAudioObjectPropertyScopeGlobal;
327 pa.mElement = kAudioObjectPropertyElementMaster;
328 UInt32 size = sizeof(input_id);
330 result = AudioObjectGetPropertyData(
331 kAudioObjectSystemObject,
332 &pa,
335 &size,
336 &input_id);
338 OSSTATUS_DCHECK(result == noErr, result);
339 if (result != noErr)
340 return result;
343 // Set the render frame size.
344 UInt32 frame_size = hardware_buffer_size_;
345 AudioObjectPropertyAddress pa;
346 pa.mSelector = kAudioDevicePropertyBufferFrameSize;
347 pa.mScope = kAudioDevicePropertyScopeInput;
348 pa.mElement = kAudioObjectPropertyElementMaster;
349 result = AudioObjectSetPropertyData(
350 input_id,
351 &pa,
354 sizeof(frame_size),
355 &frame_size);
357 OSSTATUS_DCHECK(result == noErr, result);
358 if (result != noErr)
359 return result;
361 input_info_.Initialize(input_id, true);
363 // Set the Current Device to the AUHAL.
364 // This should be done only after I/O has been enabled on the AUHAL.
365 result = AudioUnitSetProperty(
366 input_unit_,
367 kAudioOutputUnitProperty_CurrentDevice,
368 kAudioUnitScope_Global,
370 &input_info_.id_,
371 sizeof(input_info_.id_));
373 OSSTATUS_DCHECK(result == noErr, result);
374 return result;
377 OSStatus AudioSynchronizedStream::CreateAudioUnits() {
378 // Q: Why do we need a varispeed unit?
379 // A: If the input device and the output device are running at
380 // different sample rates and/or on different clocks, we will need
381 // to compensate to avoid a pitch change and
382 // to avoid buffer under and over runs.
383 ComponentDescription varispeed_desc;
384 varispeed_desc.componentType = kAudioUnitType_FormatConverter;
385 varispeed_desc.componentSubType = kAudioUnitSubType_Varispeed;
386 varispeed_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
387 varispeed_desc.componentFlags = 0;
388 varispeed_desc.componentFlagsMask = 0;
390 Component varispeed_comp = FindNextComponent(NULL, &varispeed_desc);
391 if (varispeed_comp == NULL)
392 return -1;
394 OSStatus result = OpenAComponent(varispeed_comp, &varispeed_unit_);
395 OSSTATUS_DCHECK(result == noErr, result);
396 if (result != noErr)
397 return result;
399 // Open input AudioUnit.
400 ComponentDescription input_desc;
401 input_desc.componentType = kAudioUnitType_Output;
402 input_desc.componentSubType = kAudioUnitSubType_HALOutput;
403 input_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
404 input_desc.componentFlags = 0;
405 input_desc.componentFlagsMask = 0;
407 Component input_comp = FindNextComponent(NULL, &input_desc);
408 if (input_comp == NULL)
409 return -1;
411 result = OpenAComponent(input_comp, &input_unit_);
412 OSSTATUS_DCHECK(result == noErr, result);
413 if (result != noErr)
414 return result;
416 // Open output AudioUnit.
417 ComponentDescription output_desc;
418 output_desc.componentType = kAudioUnitType_Output;
419 output_desc.componentSubType = kAudioUnitSubType_DefaultOutput;
420 output_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
421 output_desc.componentFlags = 0;
422 output_desc.componentFlagsMask = 0;
424 Component output_comp = FindNextComponent(NULL, &output_desc);
425 if (output_comp == NULL)
426 return -1;
428 result = OpenAComponent(output_comp, &output_unit_);
429 OSSTATUS_DCHECK(result == noErr, result);
430 if (result != noErr)
431 return result;
433 return noErr;
436 OSStatus AudioSynchronizedStream::SetupInput(AudioDeviceID input_id) {
437 // The AUHAL used for input needs to be initialized
438 // before anything is done to it.
439 OSStatus result = AudioUnitInitialize(input_unit_);
440 OSSTATUS_DCHECK(result == noErr, result);
441 if (result != noErr)
442 return result;
444 // We must enable the Audio Unit (AUHAL) for input and disable output
445 // BEFORE setting the AUHAL's current device.
446 result = EnableIO();
447 OSSTATUS_DCHECK(result == noErr, result);
448 if (result != noErr)
449 return result;
451 result = SetInputDeviceAsCurrent(input_id);
452 OSSTATUS_DCHECK(result == noErr, result);
454 return result;
457 OSStatus AudioSynchronizedStream::EnableIO() {
458 // Enable input on the AUHAL.
459 UInt32 enable_io = 1;
460 OSStatus result = AudioUnitSetProperty(
461 input_unit_,
462 kAudioOutputUnitProperty_EnableIO,
463 kAudioUnitScope_Input,
464 1, // input element
465 &enable_io,
466 sizeof(enable_io));
468 OSSTATUS_DCHECK(result == noErr, result);
469 if (result != noErr)
470 return result;
472 // Disable Output on the AUHAL.
473 enable_io = 0;
474 result = AudioUnitSetProperty(
475 input_unit_,
476 kAudioOutputUnitProperty_EnableIO,
477 kAudioUnitScope_Output,
478 0, // output element
479 &enable_io,
480 sizeof(enable_io));
482 OSSTATUS_DCHECK(result == noErr, result);
483 return result;
486 OSStatus AudioSynchronizedStream::SetupOutput(AudioDeviceID output_id) {
487 OSStatus result = noErr;
489 result = SetOutputDeviceAsCurrent(output_id);
490 OSSTATUS_DCHECK(result == noErr, result);
491 if (result != noErr)
492 return result;
494 // Tell the output unit not to reset timestamps.
495 // Otherwise sample rate changes will cause sync loss.
496 UInt32 start_at_zero = 0;
497 result = AudioUnitSetProperty(
498 output_unit_,
499 kAudioOutputUnitProperty_StartTimestampsAtZero,
500 kAudioUnitScope_Global,
502 &start_at_zero,
503 sizeof(start_at_zero));
505 OSSTATUS_DCHECK(result == noErr, result);
507 return result;
510 OSStatus AudioSynchronizedStream::SetupCallbacks() {
511 // Set the input callback.
512 AURenderCallbackStruct callback;
513 callback.inputProc = InputProc;
514 callback.inputProcRefCon = this;
515 OSStatus result = AudioUnitSetProperty(
516 input_unit_,
517 kAudioOutputUnitProperty_SetInputCallback,
518 kAudioUnitScope_Global,
520 &callback,
521 sizeof(callback));
523 OSSTATUS_DCHECK(result == noErr, result);
524 if (result != noErr)
525 return result;
527 // Set the output callback.
528 callback.inputProc = OutputProc;
529 callback.inputProcRefCon = this;
530 result = AudioUnitSetProperty(
531 output_unit_,
532 kAudioUnitProperty_SetRenderCallback,
533 kAudioUnitScope_Input,
535 &callback,
536 sizeof(callback));
538 OSSTATUS_DCHECK(result == noErr, result);
539 if (result != noErr)
540 return result;
542 // Set the varispeed callback.
543 callback.inputProc = VarispeedProc;
544 callback.inputProcRefCon = this;
545 result = AudioUnitSetProperty(
546 varispeed_unit_,
547 kAudioUnitProperty_SetRenderCallback,
548 kAudioUnitScope_Input,
550 &callback,
551 sizeof(callback));
553 OSSTATUS_DCHECK(result == noErr, result);
555 return result;
558 OSStatus AudioSynchronizedStream::SetupStreamFormats() {
559 AudioStreamBasicDescription asbd, asbd_dev1_in, asbd_dev2_out;
561 // Get the Stream Format (Output client side).
562 UInt32 property_size = sizeof(asbd_dev1_in);
563 OSStatus result = AudioUnitGetProperty(
564 input_unit_,
565 kAudioUnitProperty_StreamFormat,
566 kAudioUnitScope_Input,
568 &asbd_dev1_in,
569 &property_size);
571 OSSTATUS_DCHECK(result == noErr, result);
572 if (result != noErr)
573 return result;
575 // Get the Stream Format (client side).
576 property_size = sizeof(asbd);
577 result = AudioUnitGetProperty(
578 input_unit_,
579 kAudioUnitProperty_StreamFormat,
580 kAudioUnitScope_Output,
582 &asbd,
583 &property_size);
585 OSSTATUS_DCHECK(result == noErr, result);
586 if (result != noErr)
587 return result;
589 // Get the Stream Format (Output client side).
590 property_size = sizeof(asbd_dev2_out);
591 result = AudioUnitGetProperty(
592 output_unit_,
593 kAudioUnitProperty_StreamFormat,
594 kAudioUnitScope_Output,
596 &asbd_dev2_out,
597 &property_size);
599 OSSTATUS_DCHECK(result == noErr, result);
600 if (result != noErr)
601 return result;
603 // Set the format of all the AUs to the input/output devices channel count.
604 // For a simple case, you want to set this to
605 // the lower of count of the channels in the input device vs output device.
606 asbd.mChannelsPerFrame = std::min(asbd_dev1_in.mChannelsPerFrame,
607 asbd_dev2_out.mChannelsPerFrame);
609 // We must get the sample rate of the input device and set it to the
610 // stream format of AUHAL.
611 Float64 rate = 0;
612 property_size = sizeof(rate);
614 AudioObjectPropertyAddress pa;
615 pa.mSelector = kAudioDevicePropertyNominalSampleRate;
616 pa.mScope = kAudioObjectPropertyScopeWildcard;
617 pa.mElement = kAudioObjectPropertyElementMaster;
618 result = AudioObjectGetPropertyData(
619 input_info_.id_,
620 &pa,
623 &property_size,
624 &rate);
626 OSSTATUS_DCHECK(result == noErr, result);
627 if (result != noErr)
628 return result;
630 input_sample_rate_ = rate;
632 asbd.mSampleRate = rate;
633 property_size = sizeof(asbd);
635 // Set the new formats to the AUs...
636 result = AudioUnitSetProperty(
637 input_unit_,
638 kAudioUnitProperty_StreamFormat,
639 kAudioUnitScope_Output,
641 &asbd,
642 property_size);
644 OSSTATUS_DCHECK(result == noErr, result);
645 if (result != noErr)
646 return result;
648 result = AudioUnitSetProperty(
649 varispeed_unit_,
650 kAudioUnitProperty_StreamFormat,
651 kAudioUnitScope_Input,
653 &asbd,
654 property_size);
656 OSSTATUS_DCHECK(result == noErr, result);
657 if (result != noErr)
658 return result;
660 // Set the correct sample rate for the output device,
661 // but keep the channel count the same.
662 property_size = sizeof(rate);
664 pa.mSelector = kAudioDevicePropertyNominalSampleRate;
665 pa.mScope = kAudioObjectPropertyScopeWildcard;
666 pa.mElement = kAudioObjectPropertyElementMaster;
667 result = AudioObjectGetPropertyData(
668 output_info_.id_,
669 &pa,
672 &property_size,
673 &rate);
675 OSSTATUS_DCHECK(result == noErr, result);
676 if (result != noErr)
677 return result;
679 output_sample_rate_ = rate;
681 // The requested sample-rate must match the hardware sample-rate.
682 if (output_sample_rate_ != params_.sample_rate()) {
683 LOG(ERROR) << "Requested sample-rate: " << params_.sample_rate()
684 << " must match the hardware sample-rate: " << output_sample_rate_;
685 return kAudioDeviceUnsupportedFormatError;
688 asbd.mSampleRate = rate;
689 property_size = sizeof(asbd);
691 // Set the new audio stream formats for the rest of the AUs...
692 result = AudioUnitSetProperty(
693 varispeed_unit_,
694 kAudioUnitProperty_StreamFormat,
695 kAudioUnitScope_Output,
697 &asbd,
698 property_size);
700 OSSTATUS_DCHECK(result == noErr, result);
701 if (result != noErr)
702 return result;
704 result = AudioUnitSetProperty(
705 output_unit_,
706 kAudioUnitProperty_StreamFormat,
707 kAudioUnitScope_Input,
709 &asbd,
710 property_size);
712 OSSTATUS_DCHECK(result == noErr, result);
713 return result;
716 void AudioSynchronizedStream::AllocateInputData() {
717 // Get the native number of input channels that the hardware supports.
718 int hardware_channels = 0;
719 bool got_hardware_channels = AudioManagerMac::GetDeviceChannels(
720 input_id_, kAudioDevicePropertyScopeInput, &hardware_channels);
721 if (!got_hardware_channels || hardware_channels > 2) {
722 // Only mono and stereo are supported on the input side. When it fails to
723 // get the native channel number or the native channel number is bigger
724 // than 2, we open the device in stereo mode.
725 hardware_channels = 2;
728 // Allocate storage for the AudioBufferList used for the
729 // input data from the input AudioUnit.
730 // We allocate enough space for with one AudioBuffer per channel.
731 size_t malloc_size = offsetof(AudioBufferList, mBuffers[0]) +
732 (sizeof(AudioBuffer) * hardware_channels);
734 input_buffer_list_ = static_cast<AudioBufferList*>(malloc(malloc_size));
735 input_buffer_list_->mNumberBuffers = hardware_channels;
737 input_bus_ = AudioBus::Create(hardware_channels, hardware_buffer_size_);
738 wrapper_bus_ = AudioBus::CreateWrapper(channels_);
739 if (hardware_channels != params_.input_channels()) {
740 ChannelLayout hardware_channel_layout =
741 GuessChannelLayout(hardware_channels);
742 ChannelLayout requested_channel_layout =
743 GuessChannelLayout(params_.input_channels());
744 channel_mixer_.reset(new ChannelMixer(hardware_channel_layout,
745 requested_channel_layout));
746 mixer_bus_ = AudioBus::Create(params_.input_channels(),
747 hardware_buffer_size_);
750 // Allocate buffers for AudioBufferList.
751 UInt32 buffer_size_bytes = input_bus_->frames() * sizeof(Float32);
752 for (size_t i = 0; i < input_buffer_list_->mNumberBuffers; ++i) {
753 input_buffer_list_->mBuffers[i].mNumberChannels = 1;
754 input_buffer_list_->mBuffers[i].mDataByteSize = buffer_size_bytes;
755 input_buffer_list_->mBuffers[i].mData = input_bus_->channel(i);
759 OSStatus AudioSynchronizedStream::HandleInputCallback(
760 AudioUnitRenderActionFlags* io_action_flags,
761 const AudioTimeStamp* time_stamp,
762 UInt32 bus_number,
763 UInt32 number_of_frames,
764 AudioBufferList* io_data) {
765 TRACE_EVENT0("audio", "AudioSynchronizedStream::HandleInputCallback");
767 if (first_input_time_ < 0.0)
768 first_input_time_ = time_stamp->mSampleTime;
770 // Get the new audio input data.
771 OSStatus result = AudioUnitRender(
772 input_unit_,
773 io_action_flags,
774 time_stamp,
775 bus_number,
776 number_of_frames,
777 input_buffer_list_);
779 // TODO(xians): Add back the DCHECK after synchronize IO supports all
780 // combination of input and output params. See http://issue/246521.
781 if (result != noErr)
782 return result;
784 // Buffer input into FIFO.
785 int available_frames = fifo_.max_frames() - fifo_.frames();
786 if (input_bus_->frames() <= available_frames) {
787 if (channel_mixer_) {
788 channel_mixer_->Transform(input_bus_.get(), mixer_bus_.get());
789 fifo_.Push(mixer_bus_.get());
790 } else {
791 fifo_.Push(input_bus_.get());
795 return result;
798 OSStatus AudioSynchronizedStream::HandleVarispeedCallback(
799 AudioUnitRenderActionFlags* io_action_flags,
800 const AudioTimeStamp* time_stamp,
801 UInt32 bus_number,
802 UInt32 number_of_frames,
803 AudioBufferList* io_data) {
804 // Create a wrapper bus on the AudioBufferList.
805 WrapBufferList(io_data, wrapper_bus_.get(), number_of_frames);
807 if (fifo_.frames() < static_cast<int>(number_of_frames)) {
808 // We don't DCHECK here, since this is a possible run-time condition
809 // if the machine is bogged down.
810 wrapper_bus_->Zero();
811 return noErr;
814 // Read from the FIFO to feed the varispeed.
815 fifo_.Consume(wrapper_bus_.get(), 0, number_of_frames);
817 return noErr;
820 OSStatus AudioSynchronizedStream::HandleOutputCallback(
821 AudioUnitRenderActionFlags* io_action_flags,
822 const AudioTimeStamp* time_stamp,
823 UInt32 bus_number,
824 UInt32 number_of_frames,
825 AudioBufferList* io_data) {
826 // Input callback hasn't run yet or we've suddenly changed sample-rates
827 // -> silence.
828 if (first_input_time_ < 0.0 ||
829 static_cast<int>(number_of_frames) != params_.frames_per_buffer()) {
830 ZeroBufferList(io_data);
831 return noErr;
834 // Use the varispeed playback rate to offset small discrepancies
835 // in hardware clocks, and also any differences in sample-rate
836 // between input and output devices.
838 // Calculate a varispeed rate scalar factor to compensate for drift between
839 // input and output. We use the actual number of frames still in the FIFO
840 // compared with the ideal value of |target_fifo_frames_|.
841 int delta = fifo_.frames() - target_fifo_frames_;
843 // Average |delta| because it can jitter back/forth quite frequently
844 // by +/- the hardware buffer-size *if* the input and output callbacks are
845 // happening at almost exactly the same time. Also, if the input and output
846 // sample-rates are different then |delta| will jitter quite a bit due to
847 // the rate conversion happening in the varispeed, plus the jittering of
848 // the callbacks. The average value is what's important here.
849 average_delta_ += (delta - average_delta_) * 0.1;
851 // Compute a rate compensation which always attracts us back to the
852 // |target_fifo_frames_| over a period of kCorrectionTimeSeconds.
853 const double kCorrectionTimeSeconds = 0.1;
854 double correction_time_frames = kCorrectionTimeSeconds * output_sample_rate_;
855 fifo_rate_compensation_ =
856 (correction_time_frames + average_delta_) / correction_time_frames;
858 // Adjust for FIFO drift.
859 OSStatus result = AudioUnitSetParameter(
860 varispeed_unit_,
861 kVarispeedParam_PlaybackRate,
862 kAudioUnitScope_Global,
864 fifo_rate_compensation_,
867 OSSTATUS_DCHECK(result == noErr, result);
868 if (result != noErr)
869 return result;
871 // Render to the output using the varispeed.
872 result = AudioUnitRender(
873 varispeed_unit_,
874 io_action_flags,
875 time_stamp,
877 number_of_frames,
878 io_data);
880 OSSTATUS_DCHECK(result == noErr, result);
881 if (result != noErr)
882 return result;
884 // Create a wrapper bus on the AudioBufferList.
885 WrapBufferList(io_data, wrapper_bus_.get(), number_of_frames);
887 // Process in-place!
888 source_->OnMoreIOData(wrapper_bus_.get(),
889 wrapper_bus_.get(),
890 AudioBuffersState(0, 0));
892 return noErr;
895 OSStatus AudioSynchronizedStream::InputProc(
896 void* user_data,
897 AudioUnitRenderActionFlags* io_action_flags,
898 const AudioTimeStamp* time_stamp,
899 UInt32 bus_number,
900 UInt32 number_of_frames,
901 AudioBufferList* io_data) {
902 AudioSynchronizedStream* stream =
903 static_cast<AudioSynchronizedStream*>(user_data);
904 DCHECK(stream);
906 return stream->HandleInputCallback(
907 io_action_flags,
908 time_stamp,
909 bus_number,
910 number_of_frames,
911 io_data);
914 OSStatus AudioSynchronizedStream::VarispeedProc(
915 void* user_data,
916 AudioUnitRenderActionFlags* io_action_flags,
917 const AudioTimeStamp* time_stamp,
918 UInt32 bus_number,
919 UInt32 number_of_frames,
920 AudioBufferList* io_data) {
921 AudioSynchronizedStream* stream =
922 static_cast<AudioSynchronizedStream*>(user_data);
923 DCHECK(stream);
925 return stream->HandleVarispeedCallback(
926 io_action_flags,
927 time_stamp,
928 bus_number,
929 number_of_frames,
930 io_data);
933 OSStatus AudioSynchronizedStream::OutputProc(
934 void* user_data,
935 AudioUnitRenderActionFlags* io_action_flags,
936 const AudioTimeStamp* time_stamp,
937 UInt32 bus_number,
938 UInt32 number_of_frames,
939 AudioBufferList* io_data) {
940 AudioSynchronizedStream* stream =
941 static_cast<AudioSynchronizedStream*>(user_data);
942 DCHECK(stream);
944 return stream->HandleOutputCallback(
945 io_action_flags,
946 time_stamp,
947 bus_number,
948 number_of_frames,
949 io_data);
952 void AudioSynchronizedStream::AudioDeviceInfo::Initialize(
953 AudioDeviceID id, bool is_input) {
954 id_ = id;
955 is_input_ = is_input;
956 if (id_ == kAudioDeviceUnknown)
957 return;
959 UInt32 property_size = sizeof(buffer_size_frames_);
961 AudioObjectPropertyAddress pa;
962 pa.mSelector = kAudioDevicePropertyBufferFrameSize;
963 pa.mScope = kAudioObjectPropertyScopeWildcard;
964 pa.mElement = kAudioObjectPropertyElementMaster;
965 OSStatus result = AudioObjectGetPropertyData(
966 id_,
967 &pa,
970 &property_size,
971 &buffer_size_frames_);
973 OSSTATUS_DCHECK(result == noErr, result);
976 } // namespace media