1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "media/audio/win/audio_low_latency_output_win.h"
7 #include <Functiondiscoverykeys_devpkey.h>
9 #include "base/command_line.h"
10 #include "base/logging.h"
11 #include "base/memory/scoped_ptr.h"
12 #include "base/metrics/histogram.h"
13 #include "base/strings/utf_string_conversions.h"
14 #include "base/trace_event/trace_event.h"
15 #include "base/win/scoped_propvariant.h"
16 #include "media/audio/win/audio_manager_win.h"
17 #include "media/audio/win/avrt_wrapper_win.h"
18 #include "media/audio/win/core_audio_util_win.h"
19 #include "media/base/limits.h"
20 #include "media/base/media_switches.h"
22 using base::win::ScopedComPtr
;
23 using base::win::ScopedCOMInitializer
;
24 using base::win::ScopedCoMem
;
29 AUDCLNT_SHAREMODE
WASAPIAudioOutputStream::GetShareMode() {
30 const base::CommandLine
* cmd_line
= base::CommandLine::ForCurrentProcess();
31 if (cmd_line
->HasSwitch(switches::kEnableExclusiveAudio
))
32 return AUDCLNT_SHAREMODE_EXCLUSIVE
;
33 return AUDCLNT_SHAREMODE_SHARED
;
37 int WASAPIAudioOutputStream::HardwareSampleRate(const std::string
& device_id
) {
38 WAVEFORMATPCMEX format
;
39 ScopedComPtr
<IAudioClient
> client
;
40 if (device_id
.empty()) {
41 client
= CoreAudioUtil::CreateDefaultClient(eRender
, eConsole
);
43 ScopedComPtr
<IMMDevice
> device(CoreAudioUtil::CreateDevice(device_id
));
46 client
= CoreAudioUtil::CreateClient(device
.get());
50 FAILED(CoreAudioUtil::GetSharedModeMixFormat(client
.get(), &format
)))
53 return static_cast<int>(format
.Format
.nSamplesPerSec
);
56 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin
* manager
,
57 const std::string
& device_id
,
58 const AudioParameters
& params
,
60 : creating_thread_id_(base::PlatformThread::CurrentId()),
65 packet_size_frames_(0),
66 packet_size_bytes_(0),
67 endpoint_buffer_size_frames_(0),
68 device_id_(device_id
),
69 device_role_(device_role
),
70 share_mode_(GetShareMode()),
71 num_written_frames_(0),
73 audio_bus_(AudioBus::Create(params
)) {
76 DVLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()";
77 DVLOG_IF(1, share_mode_
== AUDCLNT_SHAREMODE_EXCLUSIVE
)
78 << "Core Audio (WASAPI) EXCLUSIVE MODE is enabled.";
80 // Load the Avrt DLL if not already loaded. Required to support MMCSS.
81 bool avrt_init
= avrt::Initialize();
82 DCHECK(avrt_init
) << "Failed to load the avrt.dll";
84 // Set up the desired render format specified by the client. We use the
85 // WAVE_FORMAT_EXTENSIBLE structure to ensure that multiple channel ordering
86 // and high precision data can be supported.
88 // Begin with the WAVEFORMATEX structure that specifies the basic format.
89 WAVEFORMATEX
* format
= &format_
.Format
;
90 format
->wFormatTag
= WAVE_FORMAT_EXTENSIBLE
;
91 format
->nChannels
= params
.channels();
92 format
->nSamplesPerSec
= params
.sample_rate();
93 format
->wBitsPerSample
= params
.bits_per_sample();
94 format
->nBlockAlign
= (format
->wBitsPerSample
/ 8) * format
->nChannels
;
95 format
->nAvgBytesPerSec
= format
->nSamplesPerSec
* format
->nBlockAlign
;
96 format
->cbSize
= sizeof(WAVEFORMATEXTENSIBLE
) - sizeof(WAVEFORMATEX
);
98 // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE.
99 format_
.Samples
.wValidBitsPerSample
= params
.bits_per_sample();
100 format_
.dwChannelMask
= CoreAudioUtil::GetChannelConfig(device_id
, eRender
);
101 format_
.SubFormat
= KSDATAFORMAT_SUBTYPE_PCM
;
103 // Store size (in different units) of audio packets which we expect to
104 // get from the audio endpoint device in each render event.
105 packet_size_frames_
= params
.frames_per_buffer();
106 packet_size_bytes_
= params
.GetBytesPerBuffer();
107 DVLOG(1) << "Number of bytes per audio frame : " << format
->nBlockAlign
;
108 DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_
;
109 DVLOG(1) << "Number of bytes per packet : " << packet_size_bytes_
;
110 DVLOG(1) << "Number of milliseconds per packet: "
111 << params
.GetBufferDuration().InMillisecondsF();
113 // All events are auto-reset events and non-signaled initially.
115 // Create the event which the audio engine will signal each time
116 // a buffer becomes ready to be processed by the client.
117 audio_samples_render_event_
.Set(CreateEvent(NULL
, FALSE
, FALSE
, NULL
));
118 DCHECK(audio_samples_render_event_
.IsValid());
120 // Create the event which will be set in Stop() when capturing shall stop.
121 stop_render_event_
.Set(CreateEvent(NULL
, FALSE
, FALSE
, NULL
));
122 DCHECK(stop_render_event_
.IsValid());
125 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {
126 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_
);
129 bool WASAPIAudioOutputStream::Open() {
130 DVLOG(1) << "WASAPIAudioOutputStream::Open()";
131 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_
);
135 DCHECK(!audio_client_
.get());
136 DCHECK(!audio_render_client_
.get());
138 // Will be set to true if we ended up opening the default communications
140 bool communications_device
= false;
142 // Create an IAudioClient interface for the default rendering IMMDevice.
143 ScopedComPtr
<IAudioClient
> audio_client
;
144 if (device_id_
.empty() ||
145 CoreAudioUtil::DeviceIsDefault(eRender
, device_role_
, device_id_
)) {
146 audio_client
= CoreAudioUtil::CreateDefaultClient(eRender
, device_role_
);
147 communications_device
= (device_role_
== eCommunications
);
149 ScopedComPtr
<IMMDevice
> device(CoreAudioUtil::CreateDevice(device_id_
));
150 DLOG_IF(ERROR
, !device
.get()) << "Failed to open device: " << device_id_
;
152 audio_client
= CoreAudioUtil::CreateClient(device
.get());
155 if (!audio_client
.get())
158 // Extra sanity to ensure that the provided device format is still valid.
159 if (!CoreAudioUtil::IsFormatSupported(audio_client
.get(), share_mode_
,
161 LOG(ERROR
) << "Audio parameters are not supported.";
165 HRESULT hr
= S_FALSE
;
166 if (share_mode_
== AUDCLNT_SHAREMODE_SHARED
) {
167 // Initialize the audio stream between the client and the device in shared
168 // mode and using event-driven buffer handling.
169 hr
= CoreAudioUtil::SharedModeInitialize(
170 audio_client
.get(), &format_
, audio_samples_render_event_
.Get(),
171 &endpoint_buffer_size_frames_
,
172 communications_device
? &kCommunicationsSessionId
: NULL
);
176 // We know from experience that the best possible callback sequence is
177 // achieved when the packet size (given by the native device period)
178 // is an even divisor of the endpoint buffer size.
179 // Examples: 48kHz => 960 % 480, 44.1kHz => 896 % 448 or 882 % 441.
180 if (endpoint_buffer_size_frames_
% packet_size_frames_
!= 0) {
182 << "Bailing out due to non-perfect timing. Buffer size of "
183 << packet_size_frames_
<< " is not an even divisor of "
184 << endpoint_buffer_size_frames_
;
188 // TODO(henrika): break out to CoreAudioUtil::ExclusiveModeInitialize()
189 // when removing the enable-exclusive-audio flag.
190 hr
= ExclusiveModeInitialization(audio_client
.get(),
191 audio_samples_render_event_
.Get(),
192 &endpoint_buffer_size_frames_
);
196 // The buffer scheme for exclusive mode streams is not designed for max
197 // flexibility. We only allow a "perfect match" between the packet size set
198 // by the user and the actual endpoint buffer size.
199 if (endpoint_buffer_size_frames_
!= packet_size_frames_
) {
200 LOG(ERROR
) << "Bailing out due to non-perfect timing.";
205 // Create an IAudioRenderClient client for an initialized IAudioClient.
206 // The IAudioRenderClient interface enables us to write output data to
207 // a rendering endpoint buffer.
208 ScopedComPtr
<IAudioRenderClient
> audio_render_client
=
209 CoreAudioUtil::CreateRenderClient(audio_client
.get());
210 if (!audio_render_client
.get())
213 // Store valid COM interfaces.
214 audio_client_
= audio_client
;
215 audio_render_client_
= audio_render_client
;
217 hr
= audio_client_
->GetService(__uuidof(IAudioClock
),
218 audio_clock_
.ReceiveVoid());
220 LOG(ERROR
) << "Failed to get IAudioClock service.";
228 void WASAPIAudioOutputStream::Start(AudioSourceCallback
* callback
) {
229 DVLOG(1) << "WASAPIAudioOutputStream::Start()";
230 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_
);
234 if (render_thread_
) {
235 CHECK_EQ(callback
, source_
);
241 // Ensure that the endpoint buffer is prepared with silence.
242 if (share_mode_
== AUDCLNT_SHAREMODE_SHARED
) {
243 if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence(
244 audio_client_
.get(), audio_render_client_
.get())) {
245 LOG(ERROR
) << "Failed to prepare endpoint buffers with silence.";
246 callback
->OnError(this);
250 num_written_frames_
= endpoint_buffer_size_frames_
;
252 // Create and start the thread that will drive the rendering by waiting for
254 render_thread_
.reset(
255 new base::DelegateSimpleThread(this, "wasapi_render_thread"));
256 render_thread_
->Start();
257 if (!render_thread_
->HasBeenStarted()) {
258 LOG(ERROR
) << "Failed to start WASAPI render thread.";
260 callback
->OnError(this);
264 // Start streaming data between the endpoint buffer and the audio engine.
265 HRESULT hr
= audio_client_
->Start();
267 PLOG(ERROR
) << "Failed to start output streaming: " << std::hex
<< hr
;
269 callback
->OnError(this);
273 void WASAPIAudioOutputStream::Stop() {
274 DVLOG(1) << "WASAPIAudioOutputStream::Stop()";
275 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_
);
279 // Stop output audio streaming.
280 HRESULT hr
= audio_client_
->Stop();
282 PLOG(ERROR
) << "Failed to stop output streaming: " << std::hex
<< hr
;
283 source_
->OnError(this);
286 // Make a local copy of |source_| since StopThread() will clear it.
287 AudioSourceCallback
* callback
= source_
;
290 // Flush all pending data and reset the audio clock stream position to 0.
291 hr
= audio_client_
->Reset();
293 PLOG(ERROR
) << "Failed to reset streaming: " << std::hex
<< hr
;
294 callback
->OnError(this);
297 // Extra safety check to ensure that the buffers are cleared.
298 // If the buffers are not cleared correctly, the next call to Start()
299 // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer().
300 // This check is is only needed for shared-mode streams.
301 if (share_mode_
== AUDCLNT_SHAREMODE_SHARED
) {
302 UINT32 num_queued_frames
= 0;
303 audio_client_
->GetCurrentPadding(&num_queued_frames
);
304 DCHECK_EQ(0u, num_queued_frames
);
308 void WASAPIAudioOutputStream::Close() {
309 DVLOG(1) << "WASAPIAudioOutputStream::Close()";
310 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_
);
312 // It is valid to call Close() before calling open or Start().
313 // It is also valid to call Close() after Start() has been called.
316 // Inform the audio manager that we have been closed. This will cause our
318 manager_
->ReleaseOutputStream(this);
321 void WASAPIAudioOutputStream::SetVolume(double volume
) {
322 DVLOG(1) << "SetVolume(volume=" << volume
<< ")";
323 float volume_float
= static_cast<float>(volume
);
324 if (volume_float
< 0.0f
|| volume_float
> 1.0f
) {
327 volume_
= volume_float
;
330 void WASAPIAudioOutputStream::GetVolume(double* volume
) {
331 DVLOG(1) << "GetVolume()";
332 *volume
= static_cast<double>(volume_
);
335 void WASAPIAudioOutputStream::Run() {
336 ScopedCOMInitializer
com_init(ScopedCOMInitializer::kMTA
);
338 // Increase the thread priority.
339 render_thread_
->SetThreadPriority(base::ThreadPriority::REALTIME_AUDIO
);
341 // Enable MMCSS to ensure that this thread receives prioritized access to
343 DWORD task_index
= 0;
344 HANDLE mm_task
= avrt::AvSetMmThreadCharacteristics(L
"Pro Audio",
347 (mm_task
&& avrt::AvSetMmThreadPriority(mm_task
, AVRT_PRIORITY_CRITICAL
));
349 // Failed to enable MMCSS on this thread. It is not fatal but can lead
350 // to reduced QoS at high load.
351 DWORD err
= GetLastError();
352 LOG(WARNING
) << "Failed to enable MMCSS (error code=" << err
<< ").";
355 HRESULT hr
= S_FALSE
;
359 HANDLE wait_array
[] = { stop_render_event_
.Get(),
360 audio_samples_render_event_
.Get() };
361 UINT64 device_frequency
= 0;
363 // The device frequency is the frequency generated by the hardware clock in
364 // the audio device. The GetFrequency() method reports a constant frequency.
365 hr
= audio_clock_
->GetFrequency(&device_frequency
);
367 PLOG_IF(ERROR
, error
) << "Failed to acquire IAudioClock interface: "
370 // Keep rendering audio until the stop event or the stream-switch event
371 // is signaled. An error event can also break the main thread loop.
372 while (playing
&& !error
) {
373 // Wait for a close-down event, stream-switch event or a new render event.
374 DWORD wait_result
= WaitForMultipleObjects(arraysize(wait_array
),
379 switch (wait_result
) {
380 case WAIT_OBJECT_0
+ 0:
381 // |stop_render_event_| has been set.
384 case WAIT_OBJECT_0
+ 1:
385 // |audio_samples_render_event_| has been set.
386 error
= !RenderAudioFromSource(device_frequency
);
394 if (playing
&& error
) {
395 // Stop audio rendering since something has gone wrong in our main thread
396 // loop. Note that, we are still in a "started" state, hence a Stop() call
397 // is required to join the thread properly.
398 audio_client_
->Stop();
399 PLOG(ERROR
) << "WASAPI rendering failed.";
403 if (mm_task
&& !avrt::AvRevertMmThreadCharacteristics(mm_task
)) {
404 PLOG(WARNING
) << "Failed to disable MMCSS";
408 bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency
) {
409 TRACE_EVENT0("audio", "RenderAudioFromSource");
411 HRESULT hr
= S_FALSE
;
412 UINT32 num_queued_frames
= 0;
413 uint8
* audio_data
= NULL
;
415 // Contains how much new data we can write to the buffer without
416 // the risk of overwriting previously written data that the audio
417 // engine has not yet read from the buffer.
418 size_t num_available_frames
= 0;
420 if (share_mode_
== AUDCLNT_SHAREMODE_SHARED
) {
421 // Get the padding value which represents the amount of rendering
422 // data that is queued up to play in the endpoint buffer.
423 hr
= audio_client_
->GetCurrentPadding(&num_queued_frames
);
424 num_available_frames
=
425 endpoint_buffer_size_frames_
- num_queued_frames
;
427 DLOG(ERROR
) << "Failed to retrieve amount of available space: "
432 // While the stream is running, the system alternately sends one
433 // buffer or the other to the client. This form of double buffering
434 // is referred to as "ping-ponging". Each time the client receives
435 // a buffer from the system (triggers this event) the client must
436 // process the entire buffer. Calls to the GetCurrentPadding method
437 // are unnecessary because the packet size must always equal the
438 // buffer size. In contrast to the shared mode buffering scheme,
439 // the latency for an event-driven, exclusive-mode stream depends
440 // directly on the buffer size.
441 num_available_frames
= endpoint_buffer_size_frames_
;
444 // Check if there is enough available space to fit the packet size
445 // specified by the client.
446 if (num_available_frames
< packet_size_frames_
)
449 DLOG_IF(ERROR
, num_available_frames
% packet_size_frames_
!= 0)
450 << "Non-perfect timing detected (num_available_frames="
451 << num_available_frames
<< ", packet_size_frames="
452 << packet_size_frames_
<< ")";
454 // Derive the number of packets we need to get from the client to
455 // fill up the available area in the endpoint buffer.
456 // |num_packets| will always be one for exclusive-mode streams and
457 // will be one in most cases for shared mode streams as well.
458 // However, we have found that two packets can sometimes be
460 size_t num_packets
= (num_available_frames
/ packet_size_frames_
);
462 for (size_t n
= 0; n
< num_packets
; ++n
) {
463 // Grab all available space in the rendering endpoint buffer
464 // into which the client can write a data packet.
465 hr
= audio_render_client_
->GetBuffer(packet_size_frames_
,
468 DLOG(ERROR
) << "Failed to use rendering audio buffer: "
473 // Derive the audio delay which corresponds to the delay between
474 // a render event and the time when the first audio sample in a
475 // packet is played out through the speaker. This delay value
476 // can typically be utilized by an acoustic echo-control (AEC)
477 // unit at the render side.
479 uint32 audio_delay_bytes
= 0;
480 hr
= audio_clock_
->GetPosition(&position
, NULL
);
482 // Stream position of the sample that is currently playing
483 // through the speaker.
484 double pos_sample_playing_frames
= format_
.Format
.nSamplesPerSec
*
485 (static_cast<double>(position
) / device_frequency
);
487 // Stream position of the last sample written to the endpoint
488 // buffer. Note that, the packet we are about to receive in
489 // the upcoming callback is also included.
490 size_t pos_last_sample_written_frames
=
491 num_written_frames_
+ packet_size_frames_
;
493 // Derive the actual delay value which will be fed to the
494 // render client using the OnMoreData() callback.
495 audio_delay_bytes
= (pos_last_sample_written_frames
-
496 pos_sample_playing_frames
) * format_
.Format
.nBlockAlign
;
499 // Read a data packet from the registered client source and
500 // deliver a delay estimate in the same callback to the client.
502 int frames_filled
= source_
->OnMoreData(
503 audio_bus_
.get(), audio_delay_bytes
);
504 uint32 num_filled_bytes
= frames_filled
* format_
.Format
.nBlockAlign
;
505 DCHECK_LE(num_filled_bytes
, packet_size_bytes_
);
507 // Note: If this ever changes to output raw float the data must be
508 // clipped and sanitized since it may come from an untrusted
509 // source such as NaCl.
510 const int bytes_per_sample
= format_
.Format
.wBitsPerSample
>> 3;
511 audio_bus_
->Scale(volume_
);
512 audio_bus_
->ToInterleaved(
513 frames_filled
, bytes_per_sample
, audio_data
);
516 // Release the buffer space acquired in the GetBuffer() call.
517 // Render silence if we were not able to fill up the buffer totally.
518 DWORD flags
= (num_filled_bytes
< packet_size_bytes_
) ?
519 AUDCLNT_BUFFERFLAGS_SILENT
: 0;
520 audio_render_client_
->ReleaseBuffer(packet_size_frames_
, flags
);
522 num_written_frames_
+= packet_size_frames_
;
528 HRESULT
WASAPIAudioOutputStream::ExclusiveModeInitialization(
529 IAudioClient
* client
, HANDLE event_handle
, uint32
* endpoint_buffer_size
) {
530 DCHECK_EQ(share_mode_
, AUDCLNT_SHAREMODE_EXCLUSIVE
);
532 float f
= (1000.0 * packet_size_frames_
) / format_
.Format
.nSamplesPerSec
;
533 REFERENCE_TIME requested_buffer_duration
=
534 static_cast<REFERENCE_TIME
>(f
* 10000.0 + 0.5);
536 DWORD stream_flags
= AUDCLNT_STREAMFLAGS_NOPERSIST
;
537 bool use_event
= (event_handle
!= NULL
&&
538 event_handle
!= INVALID_HANDLE_VALUE
);
540 stream_flags
|= AUDCLNT_STREAMFLAGS_EVENTCALLBACK
;
541 DVLOG(2) << "stream_flags: 0x" << std::hex
<< stream_flags
;
543 // Initialize the audio stream between the client and the device.
544 // For an exclusive-mode stream that uses event-driven buffering, the
545 // caller must specify nonzero values for hnsPeriodicity and
546 // hnsBufferDuration, and the values of these two parameters must be equal.
547 // The Initialize method allocates two buffers for the stream. Each buffer
548 // is equal in duration to the value of the hnsBufferDuration parameter.
549 // Following the Initialize call for a rendering stream, the caller should
550 // fill the first of the two buffers before starting the stream.
551 HRESULT hr
= S_FALSE
;
552 hr
= client
->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE
,
554 requested_buffer_duration
,
555 requested_buffer_duration
,
556 reinterpret_cast<WAVEFORMATEX
*>(&format_
),
559 if (hr
== AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED
) {
560 LOG(ERROR
) << "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED";
562 UINT32 aligned_buffer_size
= 0;
563 client
->GetBufferSize(&aligned_buffer_size
);
564 DVLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size
;
566 // Calculate new aligned periodicity. Each unit of reference time
567 // is 100 nanoseconds.
568 REFERENCE_TIME aligned_buffer_duration
= static_cast<REFERENCE_TIME
>(
569 (10000000.0 * aligned_buffer_size
/ format_
.Format
.nSamplesPerSec
)
572 // It is possible to re-activate and re-initialize the audio client
573 // at this stage but we bail out with an error code instead and
574 // combine it with a log message which informs about the suggested
575 // aligned buffer size which should be used instead.
576 DVLOG(1) << "aligned_buffer_duration: "
577 << static_cast<double>(aligned_buffer_duration
/ 10000.0)
579 } else if (hr
== AUDCLNT_E_INVALID_DEVICE_PERIOD
) {
580 // We will get this error if we try to use a smaller buffer size than
581 // the minimum supported size (usually ~3ms on Windows 7).
582 LOG(ERROR
) << "AUDCLNT_E_INVALID_DEVICE_PERIOD";
588 hr
= client
->SetEventHandle(event_handle
);
590 DVLOG(1) << "IAudioClient::SetEventHandle: " << std::hex
<< hr
;
595 UINT32 buffer_size_in_frames
= 0;
596 hr
= client
->GetBufferSize(&buffer_size_in_frames
);
598 DVLOG(1) << "IAudioClient::GetBufferSize: " << std::hex
<< hr
;
602 *endpoint_buffer_size
= buffer_size_in_frames
;
603 DVLOG(2) << "endpoint buffer size: " << buffer_size_in_frames
;
607 void WASAPIAudioOutputStream::StopThread() {
608 if (render_thread_
) {
609 if (render_thread_
->HasBeenStarted()) {
610 // Wait until the thread completes and perform cleanup.
611 SetEvent(stop_render_event_
.Get());
612 render_thread_
->Join();
615 render_thread_
.reset();
617 // Ensure that we don't quit the main thread loop immediately next
618 // time Start() is called.
619 ResetEvent(stop_render_event_
.Get());