Merge Chromium + Blink git repositories
[chromium-blink-merge.git] / media / audio / win / audio_low_latency_output_win.cc
blob829d18fdbabe6ebd50e3a463625303480f8e7c23
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "media/audio/win/audio_low_latency_output_win.h"
7 #include <Functiondiscoverykeys_devpkey.h>
9 #include "base/command_line.h"
10 #include "base/logging.h"
11 #include "base/metrics/histogram.h"
12 #include "base/strings/utf_string_conversions.h"
13 #include "base/trace_event/trace_event.h"
14 #include "base/win/scoped_propvariant.h"
15 #include "media/audio/win/audio_manager_win.h"
16 #include "media/audio/win/avrt_wrapper_win.h"
17 #include "media/audio/win/core_audio_util_win.h"
18 #include "media/base/limits.h"
19 #include "media/base/media_switches.h"
21 using base::win::ScopedComPtr;
22 using base::win::ScopedCOMInitializer;
23 using base::win::ScopedCoMem;
25 namespace media {
27 // static
28 AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() {
29 const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
30 if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio))
31 return AUDCLNT_SHAREMODE_EXCLUSIVE;
32 return AUDCLNT_SHAREMODE_SHARED;
35 // static
36 int WASAPIAudioOutputStream::HardwareSampleRate(const std::string& device_id) {
37 WAVEFORMATPCMEX format;
38 ScopedComPtr<IAudioClient> client;
39 if (device_id.empty()) {
40 client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
41 } else {
42 ScopedComPtr<IMMDevice> device(CoreAudioUtil::CreateDevice(device_id));
43 if (!device.get())
44 return 0;
45 client = CoreAudioUtil::CreateClient(device.get());
48 if (!client.get() ||
49 FAILED(CoreAudioUtil::GetSharedModeMixFormat(client.get(), &format)))
50 return 0;
52 return static_cast<int>(format.Format.nSamplesPerSec);
55 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
56 const std::string& device_id,
57 const AudioParameters& params,
58 ERole device_role)
59 : creating_thread_id_(base::PlatformThread::CurrentId()),
60 manager_(manager),
61 format_(),
62 opened_(false),
63 volume_(1.0),
64 packet_size_frames_(0),
65 packet_size_bytes_(0),
66 endpoint_buffer_size_frames_(0),
67 device_id_(device_id),
68 device_role_(device_role),
69 share_mode_(GetShareMode()),
70 num_written_frames_(0),
71 source_(NULL),
72 audio_bus_(AudioBus::Create(params)) {
73 DCHECK(manager_);
75 // The empty string is used to indicate a default device and the
76 // |device_role_| member controls whether that's the default or default
77 // communications device.
78 DCHECK_NE(device_id_, AudioManagerBase::kDefaultDeviceId);
79 DCHECK_NE(device_id_, AudioManagerBase::kCommunicationsDeviceId);
81 DVLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()";
82 DVLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE)
83 << "Core Audio (WASAPI) EXCLUSIVE MODE is enabled.";
85 // Load the Avrt DLL if not already loaded. Required to support MMCSS.
86 bool avrt_init = avrt::Initialize();
87 DCHECK(avrt_init) << "Failed to load the avrt.dll";
89 // Set up the desired render format specified by the client. We use the
90 // WAVE_FORMAT_EXTENSIBLE structure to ensure that multiple channel ordering
91 // and high precision data can be supported.
93 // Begin with the WAVEFORMATEX structure that specifies the basic format.
94 WAVEFORMATEX* format = &format_.Format;
95 format->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
96 format->nChannels = params.channels();
97 format->nSamplesPerSec = params.sample_rate();
98 format->wBitsPerSample = params.bits_per_sample();
99 format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels;
100 format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign;
101 format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
103 // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE.
104 format_.Samples.wValidBitsPerSample = params.bits_per_sample();
105 format_.dwChannelMask = CoreAudioUtil::GetChannelConfig(device_id, eRender);
106 format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
108 // Store size (in different units) of audio packets which we expect to
109 // get from the audio endpoint device in each render event.
110 packet_size_frames_ = params.frames_per_buffer();
111 packet_size_bytes_ = params.GetBytesPerBuffer();
112 DVLOG(1) << "Number of bytes per audio frame : " << format->nBlockAlign;
113 DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
114 DVLOG(1) << "Number of bytes per packet : " << packet_size_bytes_;
115 DVLOG(1) << "Number of milliseconds per packet: "
116 << params.GetBufferDuration().InMillisecondsF();
118 // All events are auto-reset events and non-signaled initially.
120 // Create the event which the audio engine will signal each time
121 // a buffer becomes ready to be processed by the client.
122 audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
123 DCHECK(audio_samples_render_event_.IsValid());
125 // Create the event which will be set in Stop() when capturing shall stop.
126 stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
127 DCHECK(stop_render_event_.IsValid());
130 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {
131 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
134 bool WASAPIAudioOutputStream::Open() {
135 DVLOG(1) << "WASAPIAudioOutputStream::Open()";
136 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
137 if (opened_)
138 return true;
140 DCHECK(!audio_client_.get());
141 DCHECK(!audio_render_client_.get());
143 // Will be set to true if we ended up opening the default communications
144 // device.
145 bool communications_device = false;
147 // Create an IAudioClient interface for the default rendering IMMDevice.
148 ScopedComPtr<IAudioClient> audio_client;
149 if (device_id_.empty()) {
150 audio_client = CoreAudioUtil::CreateDefaultClient(eRender, device_role_);
151 communications_device = (device_role_ == eCommunications);
152 } else {
153 ScopedComPtr<IMMDevice> device(CoreAudioUtil::CreateDevice(device_id_));
154 DLOG_IF(ERROR, !device.get()) << "Failed to open device: " << device_id_;
155 if (device.get())
156 audio_client = CoreAudioUtil::CreateClient(device.get());
159 if (!audio_client.get())
160 return false;
162 // Extra sanity to ensure that the provided device format is still valid.
163 if (!CoreAudioUtil::IsFormatSupported(audio_client.get(), share_mode_,
164 &format_)) {
165 LOG(ERROR) << "Audio parameters are not supported.";
166 return false;
169 HRESULT hr = S_FALSE;
170 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
171 // Initialize the audio stream between the client and the device in shared
172 // mode and using event-driven buffer handling.
173 hr = CoreAudioUtil::SharedModeInitialize(
174 audio_client.get(), &format_, audio_samples_render_event_.Get(),
175 &endpoint_buffer_size_frames_,
176 communications_device ? &kCommunicationsSessionId : NULL);
177 if (FAILED(hr))
178 return false;
180 REFERENCE_TIME device_period = 0;
181 if (FAILED(CoreAudioUtil::GetDevicePeriod(
182 audio_client.get(), AUDCLNT_SHAREMODE_SHARED, &device_period))) {
183 return false;
186 const int preferred_frames_per_buffer = static_cast<int>(
187 format_.Format.nSamplesPerSec *
188 CoreAudioUtil::RefererenceTimeToTimeDelta(device_period)
189 .InSecondsF() +
190 0.5);
192 // Packet size should always be an even divisor of the device period for
193 // best performance; things will still work otherwise, but may glitch for a
194 // couple of reasons.
196 // The first reason is if/when repeated RenderAudioFromSource() hit the
197 // shared memory boundary between the renderer and the browser. The next
198 // audio buffer is always requested after the current request is consumed.
199 // With back-to-back calls the round-trip may not be fast enough and thus
200 // audio will glitch as we fail to deliver audio in a timely manner.
202 // The second reason is event wakeup efficiency. We may have too few or too
203 // many frames to fill the output buffer requested by WASAPI. If too few,
204 // we'll refuse the render event and wait until more output space is
205 // available. If we have too many frames, we'll only partially fill and
206 // wait for the next render event. In either case certain remainders may
207 // leave us unable to fulfill the request in a timely manner, thus glitches.
209 // Log a warning in these cases so we can help users in the field.
210 // Examples: 48kHz => 960 % 480, 44.1kHz => 896 % 448 or 882 % 441.
211 if (preferred_frames_per_buffer % packet_size_frames_) {
212 LOG(WARNING)
213 << "Using WASAPI output with a non-optimal buffer size, glitches from"
214 << " back to back shared memory reads and partial fills of WASAPI"
215 << " output buffers may occur. Buffer size of "
216 << packet_size_frames_ << " is not an even divisor of "
217 << preferred_frames_per_buffer;
219 } else {
220 // TODO(henrika): break out to CoreAudioUtil::ExclusiveModeInitialize()
221 // when removing the enable-exclusive-audio flag.
222 hr = ExclusiveModeInitialization(audio_client.get(),
223 audio_samples_render_event_.Get(),
224 &endpoint_buffer_size_frames_);
225 if (FAILED(hr))
226 return false;
228 // The buffer scheme for exclusive mode streams is not designed for max
229 // flexibility. We only allow a "perfect match" between the packet size set
230 // by the user and the actual endpoint buffer size.
231 if (endpoint_buffer_size_frames_ != packet_size_frames_) {
232 LOG(ERROR) << "Bailing out due to non-perfect timing.";
233 return false;
237 // Create an IAudioRenderClient client for an initialized IAudioClient.
238 // The IAudioRenderClient interface enables us to write output data to
239 // a rendering endpoint buffer.
240 ScopedComPtr<IAudioRenderClient> audio_render_client =
241 CoreAudioUtil::CreateRenderClient(audio_client.get());
242 if (!audio_render_client.get())
243 return false;
245 // Store valid COM interfaces.
246 audio_client_ = audio_client;
247 audio_render_client_ = audio_render_client;
249 hr = audio_client_->GetService(__uuidof(IAudioClock),
250 audio_clock_.ReceiveVoid());
251 if (FAILED(hr)) {
252 LOG(ERROR) << "Failed to get IAudioClock service.";
253 return false;
256 opened_ = true;
257 return true;
260 void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) {
261 DVLOG(1) << "WASAPIAudioOutputStream::Start()";
262 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
263 CHECK(callback);
264 CHECK(opened_);
266 if (render_thread_) {
267 CHECK_EQ(callback, source_);
268 return;
271 source_ = callback;
273 // Ensure that the endpoint buffer is prepared with silence.
274 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
275 if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence(
276 audio_client_.get(), audio_render_client_.get())) {
277 LOG(ERROR) << "Failed to prepare endpoint buffers with silence.";
278 callback->OnError(this);
279 return;
282 num_written_frames_ = endpoint_buffer_size_frames_;
284 // Create and start the thread that will drive the rendering by waiting for
285 // render events.
286 render_thread_.reset(new base::DelegateSimpleThread(
287 this, "wasapi_render_thread",
288 base::SimpleThread::Options(base::ThreadPriority::REALTIME_AUDIO)));
289 render_thread_->Start();
290 if (!render_thread_->HasBeenStarted()) {
291 LOG(ERROR) << "Failed to start WASAPI render thread.";
292 StopThread();
293 callback->OnError(this);
294 return;
297 // Start streaming data between the endpoint buffer and the audio engine.
298 HRESULT hr = audio_client_->Start();
299 if (FAILED(hr)) {
300 PLOG(ERROR) << "Failed to start output streaming: " << std::hex << hr;
301 StopThread();
302 callback->OnError(this);
306 void WASAPIAudioOutputStream::Stop() {
307 DVLOG(1) << "WASAPIAudioOutputStream::Stop()";
308 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
309 if (!render_thread_)
310 return;
312 // Stop output audio streaming.
313 HRESULT hr = audio_client_->Stop();
314 if (FAILED(hr)) {
315 PLOG(ERROR) << "Failed to stop output streaming: " << std::hex << hr;
316 source_->OnError(this);
319 // Make a local copy of |source_| since StopThread() will clear it.
320 AudioSourceCallback* callback = source_;
321 StopThread();
323 // Flush all pending data and reset the audio clock stream position to 0.
324 hr = audio_client_->Reset();
325 if (FAILED(hr)) {
326 PLOG(ERROR) << "Failed to reset streaming: " << std::hex << hr;
327 callback->OnError(this);
330 // Extra safety check to ensure that the buffers are cleared.
331 // If the buffers are not cleared correctly, the next call to Start()
332 // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer().
333 // This check is is only needed for shared-mode streams.
334 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
335 UINT32 num_queued_frames = 0;
336 audio_client_->GetCurrentPadding(&num_queued_frames);
337 DCHECK_EQ(0u, num_queued_frames);
341 void WASAPIAudioOutputStream::Close() {
342 DVLOG(1) << "WASAPIAudioOutputStream::Close()";
343 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
345 // It is valid to call Close() before calling open or Start().
346 // It is also valid to call Close() after Start() has been called.
347 Stop();
349 // Inform the audio manager that we have been closed. This will cause our
350 // destruction.
351 manager_->ReleaseOutputStream(this);
354 void WASAPIAudioOutputStream::SetVolume(double volume) {
355 DVLOG(1) << "SetVolume(volume=" << volume << ")";
356 float volume_float = static_cast<float>(volume);
357 if (volume_float < 0.0f || volume_float > 1.0f) {
358 return;
360 volume_ = volume_float;
363 void WASAPIAudioOutputStream::GetVolume(double* volume) {
364 DVLOG(1) << "GetVolume()";
365 *volume = static_cast<double>(volume_);
368 void WASAPIAudioOutputStream::Run() {
369 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
371 // Enable MMCSS to ensure that this thread receives prioritized access to
372 // CPU resources.
373 DWORD task_index = 0;
374 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio",
375 &task_index);
376 bool mmcss_is_ok =
377 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL));
378 if (!mmcss_is_ok) {
379 // Failed to enable MMCSS on this thread. It is not fatal but can lead
380 // to reduced QoS at high load.
381 DWORD err = GetLastError();
382 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ").";
385 HRESULT hr = S_FALSE;
387 bool playing = true;
388 bool error = false;
389 HANDLE wait_array[] = { stop_render_event_.Get(),
390 audio_samples_render_event_.Get() };
391 UINT64 device_frequency = 0;
393 // The device frequency is the frequency generated by the hardware clock in
394 // the audio device. The GetFrequency() method reports a constant frequency.
395 hr = audio_clock_->GetFrequency(&device_frequency);
396 error = FAILED(hr);
397 PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: "
398 << std::hex << hr;
400 // Keep rendering audio until the stop event or the stream-switch event
401 // is signaled. An error event can also break the main thread loop.
402 while (playing && !error) {
403 // Wait for a close-down event, stream-switch event or a new render event.
404 DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array),
405 wait_array,
406 FALSE,
407 INFINITE);
409 switch (wait_result) {
410 case WAIT_OBJECT_0 + 0:
411 // |stop_render_event_| has been set.
412 playing = false;
413 break;
414 case WAIT_OBJECT_0 + 1:
415 // |audio_samples_render_event_| has been set.
416 error = !RenderAudioFromSource(device_frequency);
417 break;
418 default:
419 error = true;
420 break;
424 if (playing && error) {
425 // Stop audio rendering since something has gone wrong in our main thread
426 // loop. Note that, we are still in a "started" state, hence a Stop() call
427 // is required to join the thread properly.
428 audio_client_->Stop();
429 PLOG(ERROR) << "WASAPI rendering failed.";
432 // Disable MMCSS.
433 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
434 PLOG(WARNING) << "Failed to disable MMCSS";
438 bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) {
439 TRACE_EVENT0("audio", "RenderAudioFromSource");
441 HRESULT hr = S_FALSE;
442 UINT32 num_queued_frames = 0;
443 uint8* audio_data = NULL;
445 // Contains how much new data we can write to the buffer without
446 // the risk of overwriting previously written data that the audio
447 // engine has not yet read from the buffer.
448 size_t num_available_frames = 0;
450 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
451 // Get the padding value which represents the amount of rendering
452 // data that is queued up to play in the endpoint buffer.
453 hr = audio_client_->GetCurrentPadding(&num_queued_frames);
454 num_available_frames =
455 endpoint_buffer_size_frames_ - num_queued_frames;
456 if (FAILED(hr)) {
457 DLOG(ERROR) << "Failed to retrieve amount of available space: "
458 << std::hex << hr;
459 return false;
461 } else {
462 // While the stream is running, the system alternately sends one
463 // buffer or the other to the client. This form of double buffering
464 // is referred to as "ping-ponging". Each time the client receives
465 // a buffer from the system (triggers this event) the client must
466 // process the entire buffer. Calls to the GetCurrentPadding method
467 // are unnecessary because the packet size must always equal the
468 // buffer size. In contrast to the shared mode buffering scheme,
469 // the latency for an event-driven, exclusive-mode stream depends
470 // directly on the buffer size.
471 num_available_frames = endpoint_buffer_size_frames_;
474 // Check if there is enough available space to fit the packet size
475 // specified by the client. If not, wait until a future callback.
476 if (num_available_frames < packet_size_frames_)
477 return true;
479 // Derive the number of packets we need to get from the client to fill up the
480 // available area in the endpoint buffer. Well-behaved (> Vista) clients and
481 // exclusive mode streams should generally have a |num_packets| value of 1.
483 // Vista clients are not able to maintain reliable callbacks, so the endpoint
484 // buffer may exhaust itself such that back-to-back callbacks are occasionally
485 // necessary to avoid glitches. In such cases we have no choice but to issue
486 // back-to-back reads and pray that the browser side has enough data cached or
487 // that the render can fulfill the read before we glitch anyways.
489 // API documentation does not guarantee that even on Win7+ clients we won't
490 // need to fill more than a period size worth of buffers; but in practice this
491 // appears to be infrequent.
493 // See http://crbug.com/524947.
494 const size_t num_packets = num_available_frames / packet_size_frames_;
495 for (size_t n = 0; n < num_packets; ++n) {
496 // Grab all available space in the rendering endpoint buffer
497 // into which the client can write a data packet.
498 hr = audio_render_client_->GetBuffer(packet_size_frames_,
499 &audio_data);
500 if (FAILED(hr)) {
501 DLOG(ERROR) << "Failed to use rendering audio buffer: "
502 << std::hex << hr;
503 return false;
506 // Derive the audio delay which corresponds to the delay between
507 // a render event and the time when the first audio sample in a
508 // packet is played out through the speaker. This delay value
509 // can typically be utilized by an acoustic echo-control (AEC)
510 // unit at the render side.
511 UINT64 position = 0;
512 uint32 audio_delay_bytes = 0;
513 hr = audio_clock_->GetPosition(&position, NULL);
514 if (SUCCEEDED(hr)) {
515 // Stream position of the sample that is currently playing
516 // through the speaker.
517 double pos_sample_playing_frames = format_.Format.nSamplesPerSec *
518 (static_cast<double>(position) / device_frequency);
520 // Stream position of the last sample written to the endpoint
521 // buffer. Note that, the packet we are about to receive in
522 // the upcoming callback is also included.
523 size_t pos_last_sample_written_frames =
524 num_written_frames_ + packet_size_frames_;
526 // Derive the actual delay value which will be fed to the
527 // render client using the OnMoreData() callback.
528 audio_delay_bytes = (pos_last_sample_written_frames -
529 pos_sample_playing_frames) * format_.Format.nBlockAlign;
532 // Read a data packet from the registered client source and
533 // deliver a delay estimate in the same callback to the client.
535 int frames_filled = source_->OnMoreData(
536 audio_bus_.get(), audio_delay_bytes);
537 uint32 num_filled_bytes = frames_filled * format_.Format.nBlockAlign;
538 DCHECK_LE(num_filled_bytes, packet_size_bytes_);
540 // Note: If this ever changes to output raw float the data must be
541 // clipped and sanitized since it may come from an untrusted
542 // source such as NaCl.
543 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3;
544 audio_bus_->Scale(volume_);
545 audio_bus_->ToInterleaved(
546 frames_filled, bytes_per_sample, audio_data);
548 // Release the buffer space acquired in the GetBuffer() call.
549 // Render silence if we were not able to fill up the buffer totally.
550 DWORD flags = (num_filled_bytes < packet_size_bytes_) ?
551 AUDCLNT_BUFFERFLAGS_SILENT : 0;
552 audio_render_client_->ReleaseBuffer(packet_size_frames_, flags);
554 num_written_frames_ += packet_size_frames_;
557 return true;
560 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization(
561 IAudioClient* client, HANDLE event_handle, uint32* endpoint_buffer_size) {
562 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE);
564 float f = (1000.0 * packet_size_frames_) / format_.Format.nSamplesPerSec;
565 REFERENCE_TIME requested_buffer_duration =
566 static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5);
568 DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST;
569 bool use_event = (event_handle != NULL &&
570 event_handle != INVALID_HANDLE_VALUE);
571 if (use_event)
572 stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
573 DVLOG(2) << "stream_flags: 0x" << std::hex << stream_flags;
575 // Initialize the audio stream between the client and the device.
576 // For an exclusive-mode stream that uses event-driven buffering, the
577 // caller must specify nonzero values for hnsPeriodicity and
578 // hnsBufferDuration, and the values of these two parameters must be equal.
579 // The Initialize method allocates two buffers for the stream. Each buffer
580 // is equal in duration to the value of the hnsBufferDuration parameter.
581 // Following the Initialize call for a rendering stream, the caller should
582 // fill the first of the two buffers before starting the stream.
583 HRESULT hr = S_FALSE;
584 hr = client->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE,
585 stream_flags,
586 requested_buffer_duration,
587 requested_buffer_duration,
588 reinterpret_cast<WAVEFORMATEX*>(&format_),
589 NULL);
590 if (FAILED(hr)) {
591 if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) {
592 LOG(ERROR) << "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED";
594 UINT32 aligned_buffer_size = 0;
595 client->GetBufferSize(&aligned_buffer_size);
596 DVLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size;
598 // Calculate new aligned periodicity. Each unit of reference time
599 // is 100 nanoseconds.
600 REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>(
601 (10000000.0 * aligned_buffer_size / format_.Format.nSamplesPerSec)
602 + 0.5);
604 // It is possible to re-activate and re-initialize the audio client
605 // at this stage but we bail out with an error code instead and
606 // combine it with a log message which informs about the suggested
607 // aligned buffer size which should be used instead.
608 DVLOG(1) << "aligned_buffer_duration: "
609 << static_cast<double>(aligned_buffer_duration / 10000.0)
610 << " [ms]";
611 } else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) {
612 // We will get this error if we try to use a smaller buffer size than
613 // the minimum supported size (usually ~3ms on Windows 7).
614 LOG(ERROR) << "AUDCLNT_E_INVALID_DEVICE_PERIOD";
616 return hr;
619 if (use_event) {
620 hr = client->SetEventHandle(event_handle);
621 if (FAILED(hr)) {
622 DVLOG(1) << "IAudioClient::SetEventHandle: " << std::hex << hr;
623 return hr;
627 UINT32 buffer_size_in_frames = 0;
628 hr = client->GetBufferSize(&buffer_size_in_frames);
629 if (FAILED(hr)) {
630 DVLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr;
631 return hr;
634 *endpoint_buffer_size = buffer_size_in_frames;
635 DVLOG(2) << "endpoint buffer size: " << buffer_size_in_frames;
636 return hr;
639 void WASAPIAudioOutputStream::StopThread() {
640 if (render_thread_ ) {
641 if (render_thread_->HasBeenStarted()) {
642 // Wait until the thread completes and perform cleanup.
643 SetEvent(stop_render_event_.Get());
644 render_thread_->Join();
647 render_thread_.reset();
649 // Ensure that we don't quit the main thread loop immediately next
650 // time Start() is called.
651 ResetEvent(stop_render_event_.Get());
654 source_ = NULL;
657 } // namespace media