1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "media/audio/win/core_audio_util_win.h"
7 #include <audioclient.h>
8 #include <devicetopology.h>
9 #include <functiondiscoverykeys_devpkey.h>
11 #include "base/command_line.h"
12 #include "base/logging.h"
13 #include "base/strings/stringprintf.h"
14 #include "base/strings/utf_string_conversions.h"
15 #include "base/win/scoped_co_mem.h"
16 #include "base/win/scoped_handle.h"
17 #include "base/win/scoped_propvariant.h"
18 #include "base/win/windows_version.h"
19 #include "media/base/media_switches.h"
21 using base::win::ScopedCoMem
;
22 using base::win::ScopedHandle
;
26 // See header file for documentation.
27 // {BE39AF4F-087C-423F-9303-234EC1E5B8EE}
28 const GUID kCommunicationsSessionId
= {
29 0xbe39af4f, 0x87c, 0x423f, { 0x93, 0x3, 0x23, 0x4e, 0xc1, 0xe5, 0xb8, 0xee }
32 enum { KSAUDIO_SPEAKER_UNSUPPORTED
= 0 };
34 // Converts Microsoft's channel configuration to ChannelLayout.
35 // This mapping is not perfect but the best we can do given the current
36 // ChannelLayout enumerator and the Windows-specific speaker configurations
37 // defined in ksmedia.h. Don't assume that the channel ordering in
38 // ChannelLayout is exactly the same as the Windows specific configuration.
39 // As an example: KSAUDIO_SPEAKER_7POINT1_SURROUND is mapped to
40 // CHANNEL_LAYOUT_7_1 but the positions of Back L, Back R and Side L, Side R
41 // speakers are different in these two definitions.
42 static ChannelLayout
ChannelConfigToChannelLayout(ChannelConfig config
) {
44 case KSAUDIO_SPEAKER_DIRECTOUT
:
45 DVLOG(2) << "KSAUDIO_SPEAKER_DIRECTOUT=>CHANNEL_LAYOUT_NONE";
46 return CHANNEL_LAYOUT_NONE
;
47 case KSAUDIO_SPEAKER_MONO
:
48 DVLOG(2) << "KSAUDIO_SPEAKER_MONO=>CHANNEL_LAYOUT_MONO";
49 return CHANNEL_LAYOUT_MONO
;
50 case KSAUDIO_SPEAKER_STEREO
:
51 DVLOG(2) << "KSAUDIO_SPEAKER_STEREO=>CHANNEL_LAYOUT_STEREO";
52 return CHANNEL_LAYOUT_STEREO
;
53 case KSAUDIO_SPEAKER_QUAD
:
54 DVLOG(2) << "KSAUDIO_SPEAKER_QUAD=>CHANNEL_LAYOUT_QUAD";
55 return CHANNEL_LAYOUT_QUAD
;
56 case KSAUDIO_SPEAKER_SURROUND
:
57 DVLOG(2) << "KSAUDIO_SPEAKER_SURROUND=>CHANNEL_LAYOUT_4_0";
58 return CHANNEL_LAYOUT_4_0
;
59 case KSAUDIO_SPEAKER_5POINT1
:
60 DVLOG(2) << "KSAUDIO_SPEAKER_5POINT1=>CHANNEL_LAYOUT_5_1_BACK";
61 return CHANNEL_LAYOUT_5_1_BACK
;
62 case KSAUDIO_SPEAKER_5POINT1_SURROUND
:
63 DVLOG(2) << "KSAUDIO_SPEAKER_5POINT1_SURROUND=>CHANNEL_LAYOUT_5_1";
64 return CHANNEL_LAYOUT_5_1
;
65 case KSAUDIO_SPEAKER_7POINT1
:
66 DVLOG(2) << "KSAUDIO_SPEAKER_7POINT1=>CHANNEL_LAYOUT_7_1_WIDE";
67 return CHANNEL_LAYOUT_7_1_WIDE
;
68 case KSAUDIO_SPEAKER_7POINT1_SURROUND
:
69 DVLOG(2) << "KSAUDIO_SPEAKER_7POINT1_SURROUND=>CHANNEL_LAYOUT_7_1";
70 return CHANNEL_LAYOUT_7_1
;
72 DVLOG(2) << "Unsupported channel configuration: " << config
;
73 return CHANNEL_LAYOUT_UNSUPPORTED
;
77 // TODO(henrika): add mapping for all types in the ChannelLayout enumerator.
78 static ChannelConfig
ChannelLayoutToChannelConfig(ChannelLayout layout
) {
80 case CHANNEL_LAYOUT_NONE
:
81 DVLOG(2) << "CHANNEL_LAYOUT_NONE=>KSAUDIO_SPEAKER_UNSUPPORTED";
82 return KSAUDIO_SPEAKER_UNSUPPORTED
;
83 case CHANNEL_LAYOUT_UNSUPPORTED
:
84 DVLOG(2) << "CHANNEL_LAYOUT_UNSUPPORTED=>KSAUDIO_SPEAKER_UNSUPPORTED";
85 return KSAUDIO_SPEAKER_UNSUPPORTED
;
86 case CHANNEL_LAYOUT_MONO
:
87 DVLOG(2) << "CHANNEL_LAYOUT_MONO=>KSAUDIO_SPEAKER_MONO";
88 return KSAUDIO_SPEAKER_MONO
;
89 case CHANNEL_LAYOUT_STEREO
:
90 DVLOG(2) << "CHANNEL_LAYOUT_STEREO=>KSAUDIO_SPEAKER_STEREO";
91 return KSAUDIO_SPEAKER_STEREO
;
92 case CHANNEL_LAYOUT_QUAD
:
93 DVLOG(2) << "CHANNEL_LAYOUT_QUAD=>KSAUDIO_SPEAKER_QUAD";
94 return KSAUDIO_SPEAKER_QUAD
;
95 case CHANNEL_LAYOUT_4_0
:
96 DVLOG(2) << "CHANNEL_LAYOUT_4_0=>KSAUDIO_SPEAKER_SURROUND";
97 return KSAUDIO_SPEAKER_SURROUND
;
98 case CHANNEL_LAYOUT_5_1_BACK
:
99 DVLOG(2) << "CHANNEL_LAYOUT_5_1_BACK=>KSAUDIO_SPEAKER_5POINT1";
100 return KSAUDIO_SPEAKER_5POINT1
;
101 case CHANNEL_LAYOUT_5_1
:
102 DVLOG(2) << "CHANNEL_LAYOUT_5_1=>KSAUDIO_SPEAKER_5POINT1_SURROUND";
103 return KSAUDIO_SPEAKER_5POINT1_SURROUND
;
104 case CHANNEL_LAYOUT_7_1_WIDE
:
105 DVLOG(2) << "CHANNEL_LAYOUT_7_1_WIDE=>KSAUDIO_SPEAKER_7POINT1";
106 return KSAUDIO_SPEAKER_7POINT1
;
107 case CHANNEL_LAYOUT_7_1
:
108 DVLOG(2) << "CHANNEL_LAYOUT_7_1=>KSAUDIO_SPEAKER_7POINT1_SURROUND";
109 return KSAUDIO_SPEAKER_7POINT1_SURROUND
;
111 DVLOG(2) << "Unsupported channel layout: " << layout
;
112 return KSAUDIO_SPEAKER_UNSUPPORTED
;
116 static std::ostream
& operator<<(std::ostream
& os
,
117 const WAVEFORMATPCMEX
& format
) {
118 os
<< "wFormatTag: 0x" << std::hex
<< format
.Format
.wFormatTag
119 << ", nChannels: " << std::dec
<< format
.Format
.nChannels
120 << ", nSamplesPerSec: " << format
.Format
.nSamplesPerSec
121 << ", nAvgBytesPerSec: " << format
.Format
.nAvgBytesPerSec
122 << ", nBlockAlign: " << format
.Format
.nBlockAlign
123 << ", wBitsPerSample: " << format
.Format
.wBitsPerSample
124 << ", cbSize: " << format
.Format
.cbSize
125 << ", wValidBitsPerSample: " << format
.Samples
.wValidBitsPerSample
126 << ", dwChannelMask: 0x" << std::hex
<< format
.dwChannelMask
;
130 static bool LoadAudiosesDll() {
131 static const wchar_t* const kAudiosesDLL
=
132 L
"%WINDIR%\\system32\\audioses.dll";
134 wchar_t path
[MAX_PATH
] = {0};
135 ExpandEnvironmentStringsW(kAudiosesDLL
, path
, arraysize(path
));
136 return (LoadLibraryExW(path
, NULL
, LOAD_WITH_ALTERED_SEARCH_PATH
) != NULL
);
139 static bool CanCreateDeviceEnumerator() {
140 ScopedComPtr
<IMMDeviceEnumerator
> device_enumerator
;
141 HRESULT hr
= device_enumerator
.CreateInstance(__uuidof(MMDeviceEnumerator
),
142 NULL
, CLSCTX_INPROC_SERVER
);
144 // If we hit CO_E_NOTINITIALIZED, CoInitialize has not been called and it
145 // must be called at least once for each thread that uses the COM library.
146 CHECK_NE(hr
, CO_E_NOTINITIALIZED
);
148 return SUCCEEDED(hr
);
151 static std::string
GetDeviceID(IMMDevice
* device
) {
152 ScopedCoMem
<WCHAR
> device_id_com
;
153 std::string device_id
;
154 if (SUCCEEDED(device
->GetId(&device_id_com
)))
155 base::WideToUTF8(device_id_com
, wcslen(device_id_com
), &device_id
);
159 bool CoreAudioUtil::IsSupported() {
160 // It is possible to force usage of WaveXxx APIs by using a command line flag.
161 const CommandLine
* cmd_line
= CommandLine::ForCurrentProcess();
162 if (cmd_line
->HasSwitch(switches::kForceWaveAudio
)) {
163 DVLOG(1) << "Forcing usage of Windows WaveXxx APIs";
167 // Microsoft does not plan to make the Core Audio APIs available for use
168 // with earlier versions of Windows, including Microsoft Windows Server 2003,
169 // Windows XP, Windows Millennium Edition, Windows 2000, and Windows 98.
170 if (base::win::GetVersion() < base::win::VERSION_VISTA
)
173 // The audio core APIs are implemented in the Mmdevapi.dll and Audioses.dll
174 // system components.
175 // Dependency Walker shows that it is enough to verify possibility to load
176 // the Audioses DLL since it depends on Mmdevapi.dll.
177 // See http://crbug.com/166397 why this extra step is required to guarantee
178 // Core Audio support.
179 static bool g_audioses_dll_available
= LoadAudiosesDll();
180 if (!g_audioses_dll_available
)
183 // Being able to load the Audioses.dll does not seem to be sufficient for
184 // all devices to guarantee Core Audio support. To be 100%, we also verify
185 // that it is possible to a create the IMMDeviceEnumerator interface. If this
186 // works as well we should be home free.
187 static bool g_can_create_device_enumerator
= CanCreateDeviceEnumerator();
188 LOG_IF(ERROR
, !g_can_create_device_enumerator
)
189 << "Failed to create Core Audio device enumerator on thread with ID "
190 << GetCurrentThreadId();
191 return g_can_create_device_enumerator
;
194 base::TimeDelta
CoreAudioUtil::RefererenceTimeToTimeDelta(REFERENCE_TIME time
) {
195 // Each unit of reference time is 100 nanoseconds <=> 0.1 microsecond.
196 return base::TimeDelta::FromMicroseconds(0.1 * time
+ 0.5);
199 AUDCLNT_SHAREMODE
CoreAudioUtil::GetShareMode() {
200 const CommandLine
* cmd_line
= CommandLine::ForCurrentProcess();
201 if (cmd_line
->HasSwitch(switches::kEnableExclusiveAudio
))
202 return AUDCLNT_SHAREMODE_EXCLUSIVE
;
203 return AUDCLNT_SHAREMODE_SHARED
;
206 int CoreAudioUtil::NumberOfActiveDevices(EDataFlow data_flow
) {
207 DCHECK(IsSupported());
208 // Create the IMMDeviceEnumerator interface.
209 ScopedComPtr
<IMMDeviceEnumerator
> device_enumerator
=
210 CreateDeviceEnumerator();
211 if (!device_enumerator
.get())
214 // Generate a collection of active (present and not disabled) audio endpoint
215 // devices for the specified data-flow direction.
216 // This method will succeed even if all devices are disabled.
217 ScopedComPtr
<IMMDeviceCollection
> collection
;
218 HRESULT hr
= device_enumerator
->EnumAudioEndpoints(data_flow
,
220 collection
.Receive());
222 LOG(ERROR
) << "IMMDeviceCollection::EnumAudioEndpoints: " << std::hex
<< hr
;
226 // Retrieve the number of active audio devices for the specified direction
227 UINT number_of_active_devices
= 0;
228 collection
->GetCount(&number_of_active_devices
);
229 DVLOG(2) << ((data_flow
== eCapture
) ? "[in ] " : "[out] ")
230 << "number of devices: " << number_of_active_devices
;
231 return static_cast<int>(number_of_active_devices
);
234 ScopedComPtr
<IMMDeviceEnumerator
> CoreAudioUtil::CreateDeviceEnumerator() {
235 DCHECK(IsSupported());
236 ScopedComPtr
<IMMDeviceEnumerator
> device_enumerator
;
237 HRESULT hr
= device_enumerator
.CreateInstance(__uuidof(MMDeviceEnumerator
),
238 NULL
, CLSCTX_INPROC_SERVER
);
239 if (hr
== CO_E_NOTINITIALIZED
) {
240 LOG(ERROR
) << "CoCreateInstance fails with CO_E_NOTINITIALIZED";
241 // We have seen crashes which indicates that this method can in fact
242 // fail with CO_E_NOTINITIALIZED in combination with certain 3rd party
243 // modules. Calling CoInitializeEx is an attempt to resolve the reported
244 // issues. See http://crbug.com/378465 for details.
245 hr
= CoInitializeEx(NULL
, COINIT_MULTITHREADED
);
247 hr
= device_enumerator
.CreateInstance(__uuidof(MMDeviceEnumerator
),
248 NULL
, CLSCTX_INPROC_SERVER
);
251 CHECK(SUCCEEDED(hr
));
252 return device_enumerator
;
255 ScopedComPtr
<IMMDevice
> CoreAudioUtil::CreateDefaultDevice(EDataFlow data_flow
,
257 DCHECK(IsSupported());
258 ScopedComPtr
<IMMDevice
> endpoint_device
;
260 // Create the IMMDeviceEnumerator interface.
261 ScopedComPtr
<IMMDeviceEnumerator
> device_enumerator
=
262 CreateDeviceEnumerator();
263 if (!device_enumerator
.get())
264 return endpoint_device
;
266 // Retrieve the default audio endpoint for the specified data-flow
267 // direction and role.
268 HRESULT hr
= device_enumerator
->GetDefaultAudioEndpoint(
269 data_flow
, role
, endpoint_device
.Receive());
272 DVLOG(1) << "IMMDeviceEnumerator::GetDefaultAudioEndpoint: "
274 return endpoint_device
;
277 // Verify that the audio endpoint device is active, i.e., that the audio
278 // adapter that connects to the endpoint device is present and enabled.
279 DWORD state
= DEVICE_STATE_DISABLED
;
280 hr
= endpoint_device
->GetState(&state
);
282 if (!(state
& DEVICE_STATE_ACTIVE
)) {
283 DVLOG(1) << "Selected endpoint device is not active";
284 endpoint_device
.Release();
287 return endpoint_device
;
290 std::string
CoreAudioUtil::GetDefaultOutputDeviceID() {
291 DCHECK(IsSupported());
292 ScopedComPtr
<IMMDevice
> device(CreateDefaultDevice(eRender
, eConsole
));
293 return device
.get() ? GetDeviceID(device
.get()) : std::string();
296 ScopedComPtr
<IMMDevice
> CoreAudioUtil::CreateDevice(
297 const std::string
& device_id
) {
298 DCHECK(IsSupported());
299 ScopedComPtr
<IMMDevice
> endpoint_device
;
301 // Create the IMMDeviceEnumerator interface.
302 ScopedComPtr
<IMMDeviceEnumerator
> device_enumerator
=
303 CreateDeviceEnumerator();
304 if (!device_enumerator
.get())
305 return endpoint_device
;
307 // Retrieve an audio device specified by an endpoint device-identification
309 HRESULT hr
= device_enumerator
->GetDevice(
310 base::UTF8ToUTF16(device_id
).c_str(), endpoint_device
.Receive());
311 DVLOG_IF(1, FAILED(hr
)) << "IMMDeviceEnumerator::GetDevice: "
313 return endpoint_device
;
316 HRESULT
CoreAudioUtil::GetDeviceName(IMMDevice
* device
, AudioDeviceName
* name
) {
317 DCHECK(IsSupported());
319 // Retrieve unique name of endpoint device.
320 // Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}".
321 AudioDeviceName device_name
;
322 device_name
.unique_id
= GetDeviceID(device
);
323 if (device_name
.unique_id
.empty())
326 // Retrieve user-friendly name of endpoint device.
327 // Example: "Microphone (Realtek High Definition Audio)".
328 ScopedComPtr
<IPropertyStore
> properties
;
329 HRESULT hr
= device
->OpenPropertyStore(STGM_READ
, properties
.Receive());
332 base::win::ScopedPropVariant friendly_name
;
333 hr
= properties
->GetValue(PKEY_Device_FriendlyName
, friendly_name
.Receive());
336 if (friendly_name
.get().vt
== VT_LPWSTR
&& friendly_name
.get().pwszVal
) {
337 base::WideToUTF8(friendly_name
.get().pwszVal
,
338 wcslen(friendly_name
.get().pwszVal
),
339 &device_name
.device_name
);
343 DVLOG(2) << "friendly name: " << device_name
.device_name
;
344 DVLOG(2) << "unique id : " << device_name
.unique_id
;
348 std::string
CoreAudioUtil::GetAudioControllerID(IMMDevice
* device
,
349 IMMDeviceEnumerator
* enumerator
) {
350 DCHECK(IsSupported());
352 // Fetching the controller device id could be as simple as fetching the value
353 // of the "{B3F8FA53-0004-438E-9003-51A46E139BFC},2" property in the property
354 // store of the |device|, but that key isn't defined in any header and
355 // according to MS should not be relied upon.
356 // So, instead, we go deeper, look at the device topology and fetch the
357 // PKEY_Device_InstanceId of the associated physical audio device.
358 ScopedComPtr
<IDeviceTopology
> topology
;
359 ScopedComPtr
<IConnector
> connector
;
360 ScopedCoMem
<WCHAR
> filter_id
;
361 if (FAILED(device
->Activate(__uuidof(IDeviceTopology
), CLSCTX_ALL
, NULL
,
362 topology
.ReceiveVoid())) ||
363 // For our purposes checking the first connected device should be enough
364 // and if there are cases where there are more than one device connected
365 // we're not sure how to handle that anyway. So we pass 0.
366 FAILED(topology
->GetConnector(0, connector
.Receive())) ||
367 FAILED(connector
->GetDeviceIdConnectedTo(&filter_id
))) {
368 DLOG(ERROR
) << "Failed to get the device identifier of the audio device";
369 return std::string();
372 // Now look at the properties of the connected device node and fetch the
373 // instance id (PKEY_Device_InstanceId) of the device node that uniquely
374 // identifies the controller.
375 ScopedComPtr
<IMMDevice
> device_node
;
376 ScopedComPtr
<IPropertyStore
> properties
;
377 base::win::ScopedPropVariant instance_id
;
378 if (FAILED(enumerator
->GetDevice(filter_id
, device_node
.Receive())) ||
379 FAILED(device_node
->OpenPropertyStore(STGM_READ
, properties
.Receive())) ||
380 FAILED(properties
->GetValue(PKEY_Device_InstanceId
,
381 instance_id
.Receive())) ||
382 instance_id
.get().vt
!= VT_LPWSTR
) {
383 DLOG(ERROR
) << "Failed to get instance id of the audio device node";
384 return std::string();
387 std::string controller_id
;
388 base::WideToUTF8(instance_id
.get().pwszVal
,
389 wcslen(instance_id
.get().pwszVal
),
392 return controller_id
;
395 std::string
CoreAudioUtil::GetMatchingOutputDeviceID(
396 const std::string
& input_device_id
) {
397 ScopedComPtr
<IMMDevice
> input_device(CreateDevice(input_device_id
));
398 if (!input_device
.get())
399 return std::string();
401 // See if we can get id of the associated controller.
402 ScopedComPtr
<IMMDeviceEnumerator
> enumerator(CreateDeviceEnumerator());
403 std::string
controller_id(
404 GetAudioControllerID(input_device
.get(), enumerator
.get()));
405 if (controller_id
.empty())
406 return std::string();
408 // Now enumerate the available (and active) output devices and see if any of
409 // them is associated with the same controller.
410 ScopedComPtr
<IMMDeviceCollection
> collection
;
411 enumerator
->EnumAudioEndpoints(eRender
, DEVICE_STATE_ACTIVE
,
412 collection
.Receive());
413 if (!collection
.get())
414 return std::string();
417 collection
->GetCount(&count
);
418 ScopedComPtr
<IMMDevice
> output_device
;
419 for (UINT i
= 0; i
< count
; ++i
) {
420 collection
->Item(i
, output_device
.Receive());
421 std::string
output_controller_id(
422 GetAudioControllerID(output_device
.get(), enumerator
.get()));
423 if (output_controller_id
== controller_id
)
425 output_device
= NULL
;
428 return output_device
.get() ? GetDeviceID(output_device
.get()) : std::string();
431 std::string
CoreAudioUtil::GetFriendlyName(const std::string
& device_id
) {
432 DCHECK(IsSupported());
433 ScopedComPtr
<IMMDevice
> audio_device
= CreateDevice(device_id
);
434 if (!audio_device
.get())
435 return std::string();
437 AudioDeviceName device_name
;
438 HRESULT hr
= GetDeviceName(audio_device
.get(), &device_name
);
440 return std::string();
442 return device_name
.device_name
;
445 bool CoreAudioUtil::DeviceIsDefault(EDataFlow flow
,
447 const std::string
& device_id
) {
448 DCHECK(IsSupported());
449 ScopedComPtr
<IMMDevice
> device
= CreateDefaultDevice(flow
, role
);
453 std::string
str_default(GetDeviceID(device
.get()));
454 return device_id
.compare(str_default
) == 0;
457 EDataFlow
CoreAudioUtil::GetDataFlow(IMMDevice
* device
) {
458 DCHECK(IsSupported());
459 ScopedComPtr
<IMMEndpoint
> endpoint
;
460 HRESULT hr
= device
->QueryInterface(endpoint
.Receive());
462 DVLOG(1) << "IMMDevice::QueryInterface: " << std::hex
<< hr
;
467 hr
= endpoint
->GetDataFlow(&data_flow
);
469 DVLOG(1) << "IMMEndpoint::GetDataFlow: " << std::hex
<< hr
;
475 ScopedComPtr
<IAudioClient
> CoreAudioUtil::CreateClient(
476 IMMDevice
* audio_device
) {
477 DCHECK(IsSupported());
479 // Creates and activates an IAudioClient COM object given the selected
481 ScopedComPtr
<IAudioClient
> audio_client
;
482 HRESULT hr
= audio_device
->Activate(__uuidof(IAudioClient
),
483 CLSCTX_INPROC_SERVER
,
485 audio_client
.ReceiveVoid());
486 DVLOG_IF(1, FAILED(hr
)) << "IMMDevice::Activate: " << std::hex
<< hr
;
490 ScopedComPtr
<IAudioClient
> CoreAudioUtil::CreateDefaultClient(
491 EDataFlow data_flow
, ERole role
) {
492 DCHECK(IsSupported());
493 ScopedComPtr
<IMMDevice
> default_device(CreateDefaultDevice(data_flow
, role
));
494 return (default_device
.get() ? CreateClient(default_device
.get())
495 : ScopedComPtr
<IAudioClient
>());
498 ScopedComPtr
<IAudioClient
> CoreAudioUtil::CreateClient(
499 const std::string
& device_id
, EDataFlow data_flow
, ERole role
) {
500 if (device_id
.empty())
501 return CreateDefaultClient(data_flow
, role
);
503 ScopedComPtr
<IMMDevice
> device(CreateDevice(device_id
));
505 return ScopedComPtr
<IAudioClient
>();
507 return CreateClient(device
.get());
510 HRESULT
CoreAudioUtil::GetSharedModeMixFormat(
511 IAudioClient
* client
, WAVEFORMATPCMEX
* format
) {
512 DCHECK(IsSupported());
513 ScopedCoMem
<WAVEFORMATPCMEX
> format_pcmex
;
514 HRESULT hr
= client
->GetMixFormat(
515 reinterpret_cast<WAVEFORMATEX
**>(&format_pcmex
));
519 size_t bytes
= sizeof(WAVEFORMATEX
) + format_pcmex
->Format
.cbSize
;
520 DCHECK_EQ(bytes
, sizeof(WAVEFORMATPCMEX
));
522 memcpy(format
, format_pcmex
, bytes
);
528 bool CoreAudioUtil::IsFormatSupported(IAudioClient
* client
,
529 AUDCLNT_SHAREMODE share_mode
,
530 const WAVEFORMATPCMEX
* format
) {
531 DCHECK(IsSupported());
532 ScopedCoMem
<WAVEFORMATEXTENSIBLE
> closest_match
;
533 HRESULT hr
= client
->IsFormatSupported(
534 share_mode
, reinterpret_cast<const WAVEFORMATEX
*>(format
),
535 reinterpret_cast<WAVEFORMATEX
**>(&closest_match
));
537 // This log can only be triggered for shared mode.
538 DLOG_IF(ERROR
, hr
== S_FALSE
) << "Format is not supported "
539 << "but a closest match exists.";
540 // This log can be triggered both for shared and exclusive modes.
541 DLOG_IF(ERROR
, hr
== AUDCLNT_E_UNSUPPORTED_FORMAT
) << "Unsupported format.";
543 DVLOG(2) << *closest_match
;
549 bool CoreAudioUtil::IsChannelLayoutSupported(const std::string
& device_id
,
552 ChannelLayout channel_layout
) {
553 DCHECK(IsSupported());
555 // First, get the preferred mixing format for shared mode streams.
557 ScopedComPtr
<IAudioClient
> client(CreateClient(device_id
, data_flow
, role
));
561 WAVEFORMATPCMEX format
;
562 HRESULT hr
= GetSharedModeMixFormat(client
.get(), &format
);
566 // Next, check if it is possible to use an alternative format where the
567 // channel layout (and possibly number of channels) is modified.
569 // Convert generic channel layout into Windows-specific channel configuration.
570 ChannelConfig new_config
= ChannelLayoutToChannelConfig(channel_layout
);
571 if (new_config
== KSAUDIO_SPEAKER_UNSUPPORTED
) {
574 format
.dwChannelMask
= new_config
;
576 // Modify the format if the new channel layout has changed the number of
577 // utilized channels.
578 const int channels
= ChannelLayoutToChannelCount(channel_layout
);
579 if (channels
!= format
.Format
.nChannels
) {
580 format
.Format
.nChannels
= channels
;
581 format
.Format
.nBlockAlign
= (format
.Format
.wBitsPerSample
/ 8) * channels
;
582 format
.Format
.nAvgBytesPerSec
= format
.Format
.nSamplesPerSec
*
583 format
.Format
.nBlockAlign
;
587 // Some devices can initialize a shared-mode stream with a format that is
588 // not identical to the mix format obtained from the GetMixFormat() method.
589 // However, chances of succeeding increases if we use the same number of
590 // channels and the same sample rate as the mix format. I.e, this call will
591 // return true only in those cases where the audio engine is able to support
592 // an even wider range of shared-mode formats where the installation package
593 // for the audio device includes a local effects (LFX) audio processing
594 // object (APO) that can handle format conversions.
595 return CoreAudioUtil::IsFormatSupported(client
.get(),
596 AUDCLNT_SHAREMODE_SHARED
, &format
);
599 HRESULT
CoreAudioUtil::GetDevicePeriod(IAudioClient
* client
,
600 AUDCLNT_SHAREMODE share_mode
,
601 REFERENCE_TIME
* device_period
) {
602 DCHECK(IsSupported());
604 // Get the period of the engine thread.
605 REFERENCE_TIME default_period
= 0;
606 REFERENCE_TIME minimum_period
= 0;
607 HRESULT hr
= client
->GetDevicePeriod(&default_period
, &minimum_period
);
611 *device_period
= (share_mode
== AUDCLNT_SHAREMODE_SHARED
) ? default_period
:
613 DVLOG(2) << "device_period: "
614 << RefererenceTimeToTimeDelta(*device_period
).InMillisecondsF()
619 HRESULT
CoreAudioUtil::GetPreferredAudioParameters(
620 IAudioClient
* client
, AudioParameters
* params
) {
621 DCHECK(IsSupported());
622 WAVEFORMATPCMEX mix_format
;
623 HRESULT hr
= GetSharedModeMixFormat(client
, &mix_format
);
627 REFERENCE_TIME default_period
= 0;
628 hr
= GetDevicePeriod(client
, AUDCLNT_SHAREMODE_SHARED
, &default_period
);
632 // Get the integer mask which corresponds to the channel layout the
633 // audio engine uses for its internal processing/mixing of shared-mode
634 // streams. This mask indicates which channels are present in the multi-
635 // channel stream. The least significant bit corresponds with the Front Left
636 // speaker, the next least significant bit corresponds to the Front Right
637 // speaker, and so on, continuing in the order defined in KsMedia.h.
638 // See http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083.aspx
640 ChannelConfig channel_config
= mix_format
.dwChannelMask
;
642 // Convert Microsoft's channel configuration to genric ChannelLayout.
643 ChannelLayout channel_layout
= ChannelConfigToChannelLayout(channel_config
);
645 // Some devices don't appear to set a valid channel layout, so guess based on
646 // the number of channels. See http://crbug.com/311906.
647 if (channel_layout
== CHANNEL_LAYOUT_UNSUPPORTED
) {
648 DVLOG(1) << "Unsupported channel config: "
649 << std::hex
<< channel_config
650 << ". Guessing layout by channel count: "
651 << std::dec
<< mix_format
.Format
.nChannels
;
652 channel_layout
= GuessChannelLayout(mix_format
.Format
.nChannels
);
655 // Preferred sample rate.
656 int sample_rate
= mix_format
.Format
.nSamplesPerSec
;
658 // TODO(henrika): possibly use format.Format.wBitsPerSample here instead.
659 // We use a hard-coded value of 16 bits per sample today even if most audio
660 // engines does the actual mixing in 32 bits per sample.
661 int bits_per_sample
= 16;
663 // We are using the native device period to derive the smallest possible
664 // buffer size in shared mode. Note that the actual endpoint buffer will be
665 // larger than this size but it will be possible to fill it up in two calls.
666 // TODO(henrika): ensure that this scheme works for capturing as well.
667 int frames_per_buffer
= static_cast<int>(sample_rate
*
668 RefererenceTimeToTimeDelta(default_period
).InSecondsF() + 0.5);
670 DVLOG(1) << "channel_layout : " << channel_layout
;
671 DVLOG(1) << "sample_rate : " << sample_rate
;
672 DVLOG(1) << "bits_per_sample : " << bits_per_sample
;
673 DVLOG(1) << "frames_per_buffer: " << frames_per_buffer
;
675 AudioParameters
audio_params(AudioParameters::AUDIO_PCM_LOW_LATENCY
,
681 *params
= audio_params
;
685 HRESULT
CoreAudioUtil::GetPreferredAudioParameters(
686 EDataFlow data_flow
, ERole role
, AudioParameters
* params
) {
687 DCHECK(IsSupported());
688 ScopedComPtr
<IAudioClient
> client(CreateDefaultClient(data_flow
, role
));
690 // Map NULL-pointer to new error code which can be different from the
691 // actual error code. The exact value is not important here.
692 return AUDCLNT_E_ENDPOINT_CREATE_FAILED
;
695 HRESULT hr
= GetPreferredAudioParameters(client
.get(), params
);
699 if (role
== eCommunications
) {
700 // Raise the 'DUCKING' flag for default communication devices.
701 *params
= AudioParameters(params
->format(), params
->channel_layout(),
702 params
->channels(), params
->sample_rate(), params
->bits_per_sample(),
703 params
->frames_per_buffer(),
704 params
->effects() | AudioParameters::DUCKING
);
710 HRESULT
CoreAudioUtil::GetPreferredAudioParameters(
711 const std::string
& device_id
, AudioParameters
* params
) {
712 DCHECK(IsSupported());
713 ScopedComPtr
<IMMDevice
> device(CreateDevice(device_id
));
715 // Map NULL-pointer to new error code which can be different from the
716 // actual error code. The exact value is not important here.
717 return AUDCLNT_E_DEVICE_INVALIDATED
;
720 ScopedComPtr
<IAudioClient
> client(CreateClient(device
.get()));
722 // Map NULL-pointer to new error code which can be different from the
723 // actual error code. The exact value is not important here.
724 return AUDCLNT_E_ENDPOINT_CREATE_FAILED
;
726 return GetPreferredAudioParameters(client
.get(), params
);
729 ChannelConfig
CoreAudioUtil::GetChannelConfig(const std::string
& device_id
,
730 EDataFlow data_flow
) {
731 ScopedComPtr
<IAudioClient
> client(
732 CreateClient(device_id
, data_flow
, eConsole
));
734 WAVEFORMATPCMEX format
= {0};
735 if (!client
.get() || FAILED(GetSharedModeMixFormat(client
.get(), &format
)))
738 return static_cast<ChannelConfig
>(format
.dwChannelMask
);
741 HRESULT
CoreAudioUtil::SharedModeInitialize(
742 IAudioClient
* client
, const WAVEFORMATPCMEX
* format
, HANDLE event_handle
,
743 uint32
* endpoint_buffer_size
, const GUID
* session_guid
) {
744 DCHECK(IsSupported());
746 // Use default flags (i.e, dont set AUDCLNT_STREAMFLAGS_NOPERSIST) to
747 // ensure that the volume level and muting state for a rendering session
748 // are persistent across system restarts. The volume level and muting
749 // state for a capture session are never persistent.
750 DWORD stream_flags
= 0;
752 // Enable event-driven streaming if a valid event handle is provided.
753 // After the stream starts, the audio engine will signal the event handle
754 // to notify the client each time a buffer becomes ready to process.
755 // Event-driven buffering is supported for both rendering and capturing.
756 // Both shared-mode and exclusive-mode streams can use event-driven buffering.
757 bool use_event
= (event_handle
!= NULL
&&
758 event_handle
!= INVALID_HANDLE_VALUE
);
760 stream_flags
|= AUDCLNT_STREAMFLAGS_EVENTCALLBACK
;
761 DVLOG(2) << "stream_flags: 0x" << std::hex
<< stream_flags
;
763 // Initialize the shared mode client for minimal delay.
764 HRESULT hr
= client
->Initialize(AUDCLNT_SHAREMODE_SHARED
,
768 reinterpret_cast<const WAVEFORMATEX
*>(format
),
771 DVLOG(1) << "IAudioClient::Initialize: " << std::hex
<< hr
;
776 hr
= client
->SetEventHandle(event_handle
);
778 DVLOG(1) << "IAudioClient::SetEventHandle: " << std::hex
<< hr
;
783 UINT32 buffer_size_in_frames
= 0;
784 hr
= client
->GetBufferSize(&buffer_size_in_frames
);
786 DVLOG(1) << "IAudioClient::GetBufferSize: " << std::hex
<< hr
;
790 *endpoint_buffer_size
= buffer_size_in_frames
;
791 DVLOG(2) << "endpoint buffer size: " << buffer_size_in_frames
;
793 // TODO(henrika): utilize when delay measurements are added.
794 REFERENCE_TIME latency
= 0;
795 hr
= client
->GetStreamLatency(&latency
);
796 DVLOG(2) << "stream latency: "
797 << RefererenceTimeToTimeDelta(latency
).InMillisecondsF() << " [ms]";
801 ScopedComPtr
<IAudioRenderClient
> CoreAudioUtil::CreateRenderClient(
802 IAudioClient
* client
) {
803 DCHECK(IsSupported());
805 // Get access to the IAudioRenderClient interface. This interface
806 // enables us to write output data to a rendering endpoint buffer.
807 ScopedComPtr
<IAudioRenderClient
> audio_render_client
;
808 HRESULT hr
= client
->GetService(__uuidof(IAudioRenderClient
),
809 audio_render_client
.ReceiveVoid());
811 DVLOG(1) << "IAudioClient::GetService: " << std::hex
<< hr
;
812 return ScopedComPtr
<IAudioRenderClient
>();
814 return audio_render_client
;
817 ScopedComPtr
<IAudioCaptureClient
> CoreAudioUtil::CreateCaptureClient(
818 IAudioClient
* client
) {
819 DCHECK(IsSupported());
821 // Get access to the IAudioCaptureClient interface. This interface
822 // enables us to read input data from a capturing endpoint buffer.
823 ScopedComPtr
<IAudioCaptureClient
> audio_capture_client
;
824 HRESULT hr
= client
->GetService(__uuidof(IAudioCaptureClient
),
825 audio_capture_client
.ReceiveVoid());
827 DVLOG(1) << "IAudioClient::GetService: " << std::hex
<< hr
;
828 return ScopedComPtr
<IAudioCaptureClient
>();
830 return audio_capture_client
;
833 bool CoreAudioUtil::FillRenderEndpointBufferWithSilence(
834 IAudioClient
* client
, IAudioRenderClient
* render_client
) {
835 DCHECK(IsSupported());
837 UINT32 endpoint_buffer_size
= 0;
838 if (FAILED(client
->GetBufferSize(&endpoint_buffer_size
)))
841 UINT32 num_queued_frames
= 0;
842 if (FAILED(client
->GetCurrentPadding(&num_queued_frames
)))
846 int num_frames_to_fill
= endpoint_buffer_size
- num_queued_frames
;
847 if (FAILED(render_client
->GetBuffer(num_frames_to_fill
, &data
)))
850 // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to
851 // explicitly write silence data to the rendering buffer.
852 DVLOG(2) << "filling up " << num_frames_to_fill
<< " frames with silence";
853 return SUCCEEDED(render_client
->ReleaseBuffer(num_frames_to_fill
,
854 AUDCLNT_BUFFERFLAGS_SILENT
));