1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "media/audio/mac/audio_synchronized_mac.h"
7 #include <CoreServices/CoreServices.h>
10 #include "base/basictypes.h"
11 #include "base/debug/trace_event.h"
12 #include "base/logging.h"
13 #include "base/mac/mac_logging.h"
14 #include "media/audio/mac/audio_manager_mac.h"
15 #include "media/base/channel_mixer.h"
19 static const int kHardwareBufferSize
= 128;
20 static const int kFifoSize
= 16384;
22 // TODO(crogers): handle the non-stereo case.
23 static const int kChannels
= 2;
25 // This value was determined empirically for minimum latency while still
26 // guarding against FIFO under-runs.
27 static const int kBaseTargetFifoFrames
= 256 + 64;
29 // If the input and output sample-rate don't match, then we need to maintain
30 // an additional safety margin due to the callback timing jitter and the
31 // varispeed buffering. This value was empirically tuned.
32 static const int kAdditionalTargetFifoFrames
= 128;
34 static void ZeroBufferList(AudioBufferList
* buffer_list
) {
35 for (size_t i
= 0; i
< buffer_list
->mNumberBuffers
; ++i
)
36 memset(buffer_list
->mBuffers
[i
].mData
,
38 buffer_list
->mBuffers
[i
].mDataByteSize
);
41 static void WrapBufferList(AudioBufferList
* buffer_list
,
46 int channels
= bus
->channels();
47 int buffer_list_channels
= buffer_list
->mNumberBuffers
;
49 // Copy pointers from AudioBufferList.
51 for (int i
= 0; i
< channels
; ++i
) {
53 i
, static_cast<float*>(buffer_list
->mBuffers
[source_idx
].mData
));
55 // It's ok to pass in a |buffer_list| with fewer channels, in which
56 // case we just duplicate the last channel.
57 if (source_idx
< buffer_list_channels
- 1)
61 // Finally set the actual length.
62 bus
->set_frames(frames
);
65 AudioSynchronizedStream::AudioSynchronizedStream(
66 AudioManagerMac
* manager
,
67 const AudioParameters
& params
,
68 AudioDeviceID input_id
,
69 AudioDeviceID output_id
)
72 input_sample_rate_(0),
73 output_sample_rate_(0),
75 output_id_(output_id
),
76 input_buffer_list_(NULL
),
77 fifo_(kChannels
, kFifoSize
),
78 target_fifo_frames_(kBaseTargetFifoFrames
),
80 fifo_rate_compensation_(1.0),
84 first_input_time_(-1),
86 hardware_buffer_size_(kHardwareBufferSize
),
87 channels_(kChannels
) {
88 VLOG(1) << "AudioSynchronizedStream::AudioSynchronizedStream()";
91 AudioSynchronizedStream::~AudioSynchronizedStream() {
93 DCHECK(!output_unit_
);
94 DCHECK(!varispeed_unit_
);
97 bool AudioSynchronizedStream::Open() {
98 if (params_
.channels() != kChannels
) {
99 LOG(ERROR
) << "Only stereo output is currently supported.";
103 // Create the input, output, and varispeed AudioUnits.
104 OSStatus result
= CreateAudioUnits();
105 if (result
!= noErr
) {
106 LOG(ERROR
) << "Cannot create AudioUnits.";
110 result
= SetupInput(input_id_
);
111 if (result
!= noErr
) {
112 LOG(ERROR
) << "Error configuring input AudioUnit.";
116 result
= SetupOutput(output_id_
);
117 if (result
!= noErr
) {
118 LOG(ERROR
) << "Error configuring output AudioUnit.";
122 result
= SetupCallbacks();
123 if (result
!= noErr
) {
124 LOG(ERROR
) << "Error setting up callbacks on AudioUnits.";
128 result
= SetupStreamFormats();
129 if (result
!= noErr
) {
130 LOG(ERROR
) << "Error configuring stream formats on AudioUnits.";
136 // Final initialization of the AudioUnits.
137 result
= AudioUnitInitialize(input_unit_
);
138 if (result
!= noErr
) {
139 LOG(ERROR
) << "Error initializing input AudioUnit.";
143 result
= AudioUnitInitialize(output_unit_
);
144 if (result
!= noErr
) {
145 LOG(ERROR
) << "Error initializing output AudioUnit.";
149 result
= AudioUnitInitialize(varispeed_unit_
);
150 if (result
!= noErr
) {
151 LOG(ERROR
) << "Error initializing varispeed AudioUnit.";
155 if (input_sample_rate_
!= output_sample_rate_
) {
156 // Add extra safety margin.
157 target_fifo_frames_
+= kAdditionalTargetFifoFrames
;
160 // Buffer initial silence corresponding to target I/O buffering.
162 scoped_ptr
<AudioBus
> silence
=
163 AudioBus::Create(channels_
, target_fifo_frames_
);
165 fifo_
.Push(silence
.get());
170 void AudioSynchronizedStream::Close() {
171 DCHECK(!is_running_
);
173 if (input_buffer_list_
) {
174 free(input_buffer_list_
);
175 input_buffer_list_
= 0;
176 input_bus_
.reset(NULL
);
177 wrapper_bus_
.reset(NULL
);
181 AudioUnitUninitialize(input_unit_
);
182 CloseComponent(input_unit_
);
186 AudioUnitUninitialize(output_unit_
);
187 CloseComponent(output_unit_
);
190 if (varispeed_unit_
) {
191 AudioUnitUninitialize(varispeed_unit_
);
192 CloseComponent(varispeed_unit_
);
197 varispeed_unit_
= NULL
;
199 // Inform the audio manager that we have been closed. This can cause our
201 manager_
->ReleaseOutputStream(this);
204 void AudioSynchronizedStream::Start(AudioSourceCallback
* callback
) {
207 DCHECK(output_unit_
);
208 DCHECK(varispeed_unit_
);
210 if (is_running_
|| !input_unit_
|| !output_unit_
|| !varispeed_unit_
)
215 // Reset state variables each time we Start().
216 fifo_rate_compensation_
= 1.0;
217 average_delta_
= 0.0;
219 OSStatus result
= noErr
;
222 first_input_time_
= -1;
224 result
= AudioOutputUnitStart(input_unit_
);
225 OSSTATUS_DCHECK(result
== noErr
, result
);
227 if (result
== noErr
) {
228 result
= AudioOutputUnitStart(output_unit_
);
229 OSSTATUS_DCHECK(result
== noErr
, result
);
236 void AudioSynchronizedStream::Stop() {
237 OSStatus result
= noErr
;
239 result
= AudioOutputUnitStop(input_unit_
);
240 OSSTATUS_DCHECK(result
== noErr
, result
);
242 if (result
== noErr
) {
243 result
= AudioOutputUnitStop(output_unit_
);
244 OSSTATUS_DCHECK(result
== noErr
, result
);
252 bool AudioSynchronizedStream::IsRunning() {
256 // TODO(crogers): implement - or remove from AudioOutputStream.
257 void AudioSynchronizedStream::SetVolume(double volume
) {}
258 void AudioSynchronizedStream::GetVolume(double* volume
) {}
260 OSStatus
AudioSynchronizedStream::SetOutputDeviceAsCurrent(
261 AudioDeviceID output_id
) {
262 OSStatus result
= noErr
;
264 // Get the default output device if device is unknown.
265 if (output_id
== kAudioDeviceUnknown
) {
266 AudioObjectPropertyAddress pa
;
267 pa
.mSelector
= kAudioHardwarePropertyDefaultOutputDevice
;
268 pa
.mScope
= kAudioObjectPropertyScopeGlobal
;
269 pa
.mElement
= kAudioObjectPropertyElementMaster
;
270 UInt32 size
= sizeof(output_id
);
272 result
= AudioObjectGetPropertyData(
273 kAudioObjectSystemObject
,
280 OSSTATUS_DCHECK(result
== noErr
, result
);
285 // Set the render frame size.
286 UInt32 frame_size
= hardware_buffer_size_
;
287 AudioObjectPropertyAddress pa
;
288 pa
.mSelector
= kAudioDevicePropertyBufferFrameSize
;
289 pa
.mScope
= kAudioDevicePropertyScopeInput
;
290 pa
.mElement
= kAudioObjectPropertyElementMaster
;
291 result
= AudioObjectSetPropertyData(
299 OSSTATUS_DCHECK(result
== noErr
, result
);
303 output_info_
.Initialize(output_id
, false);
305 // Set the Current Device to the Default Output Unit.
306 result
= AudioUnitSetProperty(
308 kAudioOutputUnitProperty_CurrentDevice
,
309 kAudioUnitScope_Global
,
312 sizeof(output_info_
.id_
));
314 OSSTATUS_DCHECK(result
== noErr
, result
);
318 OSStatus
AudioSynchronizedStream::SetInputDeviceAsCurrent(
319 AudioDeviceID input_id
) {
320 OSStatus result
= noErr
;
322 // Get the default input device if device is unknown.
323 if (input_id
== kAudioDeviceUnknown
) {
324 AudioObjectPropertyAddress pa
;
325 pa
.mSelector
= kAudioHardwarePropertyDefaultInputDevice
;
326 pa
.mScope
= kAudioObjectPropertyScopeGlobal
;
327 pa
.mElement
= kAudioObjectPropertyElementMaster
;
328 UInt32 size
= sizeof(input_id
);
330 result
= AudioObjectGetPropertyData(
331 kAudioObjectSystemObject
,
338 OSSTATUS_DCHECK(result
== noErr
, result
);
343 // Set the render frame size.
344 UInt32 frame_size
= hardware_buffer_size_
;
345 AudioObjectPropertyAddress pa
;
346 pa
.mSelector
= kAudioDevicePropertyBufferFrameSize
;
347 pa
.mScope
= kAudioDevicePropertyScopeInput
;
348 pa
.mElement
= kAudioObjectPropertyElementMaster
;
349 result
= AudioObjectSetPropertyData(
357 OSSTATUS_DCHECK(result
== noErr
, result
);
361 input_info_
.Initialize(input_id
, true);
363 // Set the Current Device to the AUHAL.
364 // This should be done only after I/O has been enabled on the AUHAL.
365 result
= AudioUnitSetProperty(
367 kAudioOutputUnitProperty_CurrentDevice
,
368 kAudioUnitScope_Global
,
371 sizeof(input_info_
.id_
));
373 OSSTATUS_DCHECK(result
== noErr
, result
);
377 OSStatus
AudioSynchronizedStream::CreateAudioUnits() {
378 // Q: Why do we need a varispeed unit?
379 // A: If the input device and the output device are running at
380 // different sample rates and/or on different clocks, we will need
381 // to compensate to avoid a pitch change and
382 // to avoid buffer under and over runs.
383 ComponentDescription varispeed_desc
;
384 varispeed_desc
.componentType
= kAudioUnitType_FormatConverter
;
385 varispeed_desc
.componentSubType
= kAudioUnitSubType_Varispeed
;
386 varispeed_desc
.componentManufacturer
= kAudioUnitManufacturer_Apple
;
387 varispeed_desc
.componentFlags
= 0;
388 varispeed_desc
.componentFlagsMask
= 0;
390 Component varispeed_comp
= FindNextComponent(NULL
, &varispeed_desc
);
391 if (varispeed_comp
== NULL
)
394 OSStatus result
= OpenAComponent(varispeed_comp
, &varispeed_unit_
);
395 OSSTATUS_DCHECK(result
== noErr
, result
);
399 // Open input AudioUnit.
400 ComponentDescription input_desc
;
401 input_desc
.componentType
= kAudioUnitType_Output
;
402 input_desc
.componentSubType
= kAudioUnitSubType_HALOutput
;
403 input_desc
.componentManufacturer
= kAudioUnitManufacturer_Apple
;
404 input_desc
.componentFlags
= 0;
405 input_desc
.componentFlagsMask
= 0;
407 Component input_comp
= FindNextComponent(NULL
, &input_desc
);
408 if (input_comp
== NULL
)
411 result
= OpenAComponent(input_comp
, &input_unit_
);
412 OSSTATUS_DCHECK(result
== noErr
, result
);
416 // Open output AudioUnit.
417 ComponentDescription output_desc
;
418 output_desc
.componentType
= kAudioUnitType_Output
;
419 output_desc
.componentSubType
= kAudioUnitSubType_DefaultOutput
;
420 output_desc
.componentManufacturer
= kAudioUnitManufacturer_Apple
;
421 output_desc
.componentFlags
= 0;
422 output_desc
.componentFlagsMask
= 0;
424 Component output_comp
= FindNextComponent(NULL
, &output_desc
);
425 if (output_comp
== NULL
)
428 result
= OpenAComponent(output_comp
, &output_unit_
);
429 OSSTATUS_DCHECK(result
== noErr
, result
);
436 OSStatus
AudioSynchronizedStream::SetupInput(AudioDeviceID input_id
) {
437 // The AUHAL used for input needs to be initialized
438 // before anything is done to it.
439 OSStatus result
= AudioUnitInitialize(input_unit_
);
440 OSSTATUS_DCHECK(result
== noErr
, result
);
444 // We must enable the Audio Unit (AUHAL) for input and disable output
445 // BEFORE setting the AUHAL's current device.
447 OSSTATUS_DCHECK(result
== noErr
, result
);
451 result
= SetInputDeviceAsCurrent(input_id
);
452 OSSTATUS_DCHECK(result
== noErr
, result
);
457 OSStatus
AudioSynchronizedStream::EnableIO() {
458 // Enable input on the AUHAL.
459 UInt32 enable_io
= 1;
460 OSStatus result
= AudioUnitSetProperty(
462 kAudioOutputUnitProperty_EnableIO
,
463 kAudioUnitScope_Input
,
468 OSSTATUS_DCHECK(result
== noErr
, result
);
472 // Disable Output on the AUHAL.
474 result
= AudioUnitSetProperty(
476 kAudioOutputUnitProperty_EnableIO
,
477 kAudioUnitScope_Output
,
482 OSSTATUS_DCHECK(result
== noErr
, result
);
486 OSStatus
AudioSynchronizedStream::SetupOutput(AudioDeviceID output_id
) {
487 OSStatus result
= noErr
;
489 result
= SetOutputDeviceAsCurrent(output_id
);
490 OSSTATUS_DCHECK(result
== noErr
, result
);
494 // Tell the output unit not to reset timestamps.
495 // Otherwise sample rate changes will cause sync loss.
496 UInt32 start_at_zero
= 0;
497 result
= AudioUnitSetProperty(
499 kAudioOutputUnitProperty_StartTimestampsAtZero
,
500 kAudioUnitScope_Global
,
503 sizeof(start_at_zero
));
505 OSSTATUS_DCHECK(result
== noErr
, result
);
510 OSStatus
AudioSynchronizedStream::SetupCallbacks() {
511 // Set the input callback.
512 AURenderCallbackStruct callback
;
513 callback
.inputProc
= InputProc
;
514 callback
.inputProcRefCon
= this;
515 OSStatus result
= AudioUnitSetProperty(
517 kAudioOutputUnitProperty_SetInputCallback
,
518 kAudioUnitScope_Global
,
523 OSSTATUS_DCHECK(result
== noErr
, result
);
527 // Set the output callback.
528 callback
.inputProc
= OutputProc
;
529 callback
.inputProcRefCon
= this;
530 result
= AudioUnitSetProperty(
532 kAudioUnitProperty_SetRenderCallback
,
533 kAudioUnitScope_Input
,
538 OSSTATUS_DCHECK(result
== noErr
, result
);
542 // Set the varispeed callback.
543 callback
.inputProc
= VarispeedProc
;
544 callback
.inputProcRefCon
= this;
545 result
= AudioUnitSetProperty(
547 kAudioUnitProperty_SetRenderCallback
,
548 kAudioUnitScope_Input
,
553 OSSTATUS_DCHECK(result
== noErr
, result
);
558 OSStatus
AudioSynchronizedStream::SetupStreamFormats() {
559 AudioStreamBasicDescription asbd
, asbd_dev1_in
, asbd_dev2_out
;
561 // Get the Stream Format (Output client side).
562 UInt32 property_size
= sizeof(asbd_dev1_in
);
563 OSStatus result
= AudioUnitGetProperty(
565 kAudioUnitProperty_StreamFormat
,
566 kAudioUnitScope_Input
,
571 OSSTATUS_DCHECK(result
== noErr
, result
);
575 // Get the Stream Format (client side).
576 property_size
= sizeof(asbd
);
577 result
= AudioUnitGetProperty(
579 kAudioUnitProperty_StreamFormat
,
580 kAudioUnitScope_Output
,
585 OSSTATUS_DCHECK(result
== noErr
, result
);
589 // Get the Stream Format (Output client side).
590 property_size
= sizeof(asbd_dev2_out
);
591 result
= AudioUnitGetProperty(
593 kAudioUnitProperty_StreamFormat
,
594 kAudioUnitScope_Output
,
599 OSSTATUS_DCHECK(result
== noErr
, result
);
603 // Set the format of all the AUs to the input/output devices channel count.
604 // For a simple case, you want to set this to
605 // the lower of count of the channels in the input device vs output device.
606 asbd
.mChannelsPerFrame
= std::min(asbd_dev1_in
.mChannelsPerFrame
,
607 asbd_dev2_out
.mChannelsPerFrame
);
609 // We must get the sample rate of the input device and set it to the
610 // stream format of AUHAL.
612 property_size
= sizeof(rate
);
614 AudioObjectPropertyAddress pa
;
615 pa
.mSelector
= kAudioDevicePropertyNominalSampleRate
;
616 pa
.mScope
= kAudioObjectPropertyScopeWildcard
;
617 pa
.mElement
= kAudioObjectPropertyElementMaster
;
618 result
= AudioObjectGetPropertyData(
626 OSSTATUS_DCHECK(result
== noErr
, result
);
630 input_sample_rate_
= rate
;
632 asbd
.mSampleRate
= rate
;
633 property_size
= sizeof(asbd
);
635 // Set the new formats to the AUs...
636 result
= AudioUnitSetProperty(
638 kAudioUnitProperty_StreamFormat
,
639 kAudioUnitScope_Output
,
644 OSSTATUS_DCHECK(result
== noErr
, result
);
648 result
= AudioUnitSetProperty(
650 kAudioUnitProperty_StreamFormat
,
651 kAudioUnitScope_Input
,
656 OSSTATUS_DCHECK(result
== noErr
, result
);
660 // Set the correct sample rate for the output device,
661 // but keep the channel count the same.
662 property_size
= sizeof(rate
);
664 pa
.mSelector
= kAudioDevicePropertyNominalSampleRate
;
665 pa
.mScope
= kAudioObjectPropertyScopeWildcard
;
666 pa
.mElement
= kAudioObjectPropertyElementMaster
;
667 result
= AudioObjectGetPropertyData(
675 OSSTATUS_DCHECK(result
== noErr
, result
);
679 output_sample_rate_
= rate
;
681 // The requested sample-rate must match the hardware sample-rate.
682 if (output_sample_rate_
!= params_
.sample_rate()) {
683 LOG(ERROR
) << "Requested sample-rate: " << params_
.sample_rate()
684 << " must match the hardware sample-rate: " << output_sample_rate_
;
685 return kAudioDeviceUnsupportedFormatError
;
688 asbd
.mSampleRate
= rate
;
689 property_size
= sizeof(asbd
);
691 // Set the new audio stream formats for the rest of the AUs...
692 result
= AudioUnitSetProperty(
694 kAudioUnitProperty_StreamFormat
,
695 kAudioUnitScope_Output
,
700 OSSTATUS_DCHECK(result
== noErr
, result
);
704 result
= AudioUnitSetProperty(
706 kAudioUnitProperty_StreamFormat
,
707 kAudioUnitScope_Input
,
712 OSSTATUS_DCHECK(result
== noErr
, result
);
716 void AudioSynchronizedStream::AllocateInputData() {
717 // Get the native number of input channels that the hardware supports.
718 int hardware_channels
= 0;
719 bool got_hardware_channels
= AudioManagerMac::GetDeviceChannels(
720 input_id_
, kAudioDevicePropertyScopeInput
, &hardware_channels
);
721 if (!got_hardware_channels
|| hardware_channels
> 2) {
722 // Only mono and stereo are supported on the input side. When it fails to
723 // get the native channel number or the native channel number is bigger
724 // than 2, we open the device in stereo mode.
725 hardware_channels
= 2;
728 // Allocate storage for the AudioBufferList used for the
729 // input data from the input AudioUnit.
730 // We allocate enough space for with one AudioBuffer per channel.
731 size_t malloc_size
= offsetof(AudioBufferList
, mBuffers
[0]) +
732 (sizeof(AudioBuffer
) * hardware_channels
);
734 input_buffer_list_
= static_cast<AudioBufferList
*>(malloc(malloc_size
));
735 input_buffer_list_
->mNumberBuffers
= hardware_channels
;
737 input_bus_
= AudioBus::Create(hardware_channels
, hardware_buffer_size_
);
738 wrapper_bus_
= AudioBus::CreateWrapper(channels_
);
739 if (hardware_channels
!= params_
.input_channels()) {
740 ChannelLayout hardware_channel_layout
=
741 GuessChannelLayout(hardware_channels
);
742 ChannelLayout requested_channel_layout
=
743 GuessChannelLayout(params_
.input_channels());
744 channel_mixer_
.reset(new ChannelMixer(hardware_channel_layout
,
745 requested_channel_layout
));
746 mixer_bus_
= AudioBus::Create(params_
.input_channels(),
747 hardware_buffer_size_
);
750 // Allocate buffers for AudioBufferList.
751 UInt32 buffer_size_bytes
= input_bus_
->frames() * sizeof(Float32
);
752 for (size_t i
= 0; i
< input_buffer_list_
->mNumberBuffers
; ++i
) {
753 input_buffer_list_
->mBuffers
[i
].mNumberChannels
= 1;
754 input_buffer_list_
->mBuffers
[i
].mDataByteSize
= buffer_size_bytes
;
755 input_buffer_list_
->mBuffers
[i
].mData
= input_bus_
->channel(i
);
759 OSStatus
AudioSynchronizedStream::HandleInputCallback(
760 AudioUnitRenderActionFlags
* io_action_flags
,
761 const AudioTimeStamp
* time_stamp
,
763 UInt32 number_of_frames
,
764 AudioBufferList
* io_data
) {
765 TRACE_EVENT0("audio", "AudioSynchronizedStream::HandleInputCallback");
767 if (first_input_time_
< 0.0)
768 first_input_time_
= time_stamp
->mSampleTime
;
770 // Get the new audio input data.
771 OSStatus result
= AudioUnitRender(
779 // TODO(xians): Add back the DCHECK after synchronize IO supports all
780 // combination of input and output params. See http://issue/246521.
784 // Buffer input into FIFO.
785 int available_frames
= fifo_
.max_frames() - fifo_
.frames();
786 if (input_bus_
->frames() <= available_frames
) {
787 if (channel_mixer_
) {
788 channel_mixer_
->Transform(input_bus_
.get(), mixer_bus_
.get());
789 fifo_
.Push(mixer_bus_
.get());
791 fifo_
.Push(input_bus_
.get());
798 OSStatus
AudioSynchronizedStream::HandleVarispeedCallback(
799 AudioUnitRenderActionFlags
* io_action_flags
,
800 const AudioTimeStamp
* time_stamp
,
802 UInt32 number_of_frames
,
803 AudioBufferList
* io_data
) {
804 // Create a wrapper bus on the AudioBufferList.
805 WrapBufferList(io_data
, wrapper_bus_
.get(), number_of_frames
);
807 if (fifo_
.frames() < static_cast<int>(number_of_frames
)) {
808 // We don't DCHECK here, since this is a possible run-time condition
809 // if the machine is bogged down.
810 wrapper_bus_
->Zero();
814 // Read from the FIFO to feed the varispeed.
815 fifo_
.Consume(wrapper_bus_
.get(), 0, number_of_frames
);
820 OSStatus
AudioSynchronizedStream::HandleOutputCallback(
821 AudioUnitRenderActionFlags
* io_action_flags
,
822 const AudioTimeStamp
* time_stamp
,
824 UInt32 number_of_frames
,
825 AudioBufferList
* io_data
) {
826 // Input callback hasn't run yet or we've suddenly changed sample-rates
828 if (first_input_time_
< 0.0 ||
829 static_cast<int>(number_of_frames
) != params_
.frames_per_buffer()) {
830 ZeroBufferList(io_data
);
834 // Use the varispeed playback rate to offset small discrepancies
835 // in hardware clocks, and also any differences in sample-rate
836 // between input and output devices.
838 // Calculate a varispeed rate scalar factor to compensate for drift between
839 // input and output. We use the actual number of frames still in the FIFO
840 // compared with the ideal value of |target_fifo_frames_|.
841 int delta
= fifo_
.frames() - target_fifo_frames_
;
843 // Average |delta| because it can jitter back/forth quite frequently
844 // by +/- the hardware buffer-size *if* the input and output callbacks are
845 // happening at almost exactly the same time. Also, if the input and output
846 // sample-rates are different then |delta| will jitter quite a bit due to
847 // the rate conversion happening in the varispeed, plus the jittering of
848 // the callbacks. The average value is what's important here.
849 average_delta_
+= (delta
- average_delta_
) * 0.1;
851 // Compute a rate compensation which always attracts us back to the
852 // |target_fifo_frames_| over a period of kCorrectionTimeSeconds.
853 const double kCorrectionTimeSeconds
= 0.1;
854 double correction_time_frames
= kCorrectionTimeSeconds
* output_sample_rate_
;
855 fifo_rate_compensation_
=
856 (correction_time_frames
+ average_delta_
) / correction_time_frames
;
858 // Adjust for FIFO drift.
859 OSStatus result
= AudioUnitSetParameter(
861 kVarispeedParam_PlaybackRate
,
862 kAudioUnitScope_Global
,
864 fifo_rate_compensation_
,
867 OSSTATUS_DCHECK(result
== noErr
, result
);
871 // Render to the output using the varispeed.
872 result
= AudioUnitRender(
880 OSSTATUS_DCHECK(result
== noErr
, result
);
884 // Create a wrapper bus on the AudioBufferList.
885 WrapBufferList(io_data
, wrapper_bus_
.get(), number_of_frames
);
888 source_
->OnMoreIOData(wrapper_bus_
.get(),
890 AudioBuffersState(0, 0));
895 OSStatus
AudioSynchronizedStream::InputProc(
897 AudioUnitRenderActionFlags
* io_action_flags
,
898 const AudioTimeStamp
* time_stamp
,
900 UInt32 number_of_frames
,
901 AudioBufferList
* io_data
) {
902 AudioSynchronizedStream
* stream
=
903 static_cast<AudioSynchronizedStream
*>(user_data
);
906 return stream
->HandleInputCallback(
914 OSStatus
AudioSynchronizedStream::VarispeedProc(
916 AudioUnitRenderActionFlags
* io_action_flags
,
917 const AudioTimeStamp
* time_stamp
,
919 UInt32 number_of_frames
,
920 AudioBufferList
* io_data
) {
921 AudioSynchronizedStream
* stream
=
922 static_cast<AudioSynchronizedStream
*>(user_data
);
925 return stream
->HandleVarispeedCallback(
933 OSStatus
AudioSynchronizedStream::OutputProc(
935 AudioUnitRenderActionFlags
* io_action_flags
,
936 const AudioTimeStamp
* time_stamp
,
938 UInt32 number_of_frames
,
939 AudioBufferList
* io_data
) {
940 AudioSynchronizedStream
* stream
=
941 static_cast<AudioSynchronizedStream
*>(user_data
);
944 return stream
->HandleOutputCallback(
952 void AudioSynchronizedStream::AudioDeviceInfo::Initialize(
953 AudioDeviceID id
, bool is_input
) {
955 is_input_
= is_input
;
956 if (id_
== kAudioDeviceUnknown
)
959 UInt32 property_size
= sizeof(buffer_size_frames_
);
961 AudioObjectPropertyAddress pa
;
962 pa
.mSelector
= kAudioDevicePropertyBufferFrameSize
;
963 pa
.mScope
= kAudioObjectPropertyScopeWildcard
;
964 pa
.mElement
= kAudioObjectPropertyElementMaster
;
965 OSStatus result
= AudioObjectGetPropertyData(
971 &buffer_size_frames_
);
973 OSSTATUS_DCHECK(result
== noErr
, result
);