Removed unused VideoCaptureCapability parameters.
[chromium-blink-merge.git] / media / audio / win / core_audio_util_win.cc
blob4771460b3f0592acb3d2d6c2d3366d3eb7377c04
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "media/audio/win/core_audio_util_win.h"
7 #include <audioclient.h>
8 #include <devicetopology.h>
9 #include <functiondiscoverykeys_devpkey.h>
11 #include "base/command_line.h"
12 #include "base/logging.h"
13 #include "base/strings/stringprintf.h"
14 #include "base/strings/utf_string_conversions.h"
15 #include "base/win/scoped_co_mem.h"
16 #include "base/win/scoped_handle.h"
17 #include "base/win/scoped_propvariant.h"
18 #include "base/win/windows_version.h"
19 #include "media/base/media_switches.h"
21 using base::win::ScopedCoMem;
22 using base::win::ScopedHandle;
24 namespace media {
26 enum { KSAUDIO_SPEAKER_UNSUPPORTED = 0 };
28 // Converts Microsoft's channel configuration to ChannelLayout.
29 // This mapping is not perfect but the best we can do given the current
30 // ChannelLayout enumerator and the Windows-specific speaker configurations
31 // defined in ksmedia.h. Don't assume that the channel ordering in
32 // ChannelLayout is exactly the same as the Windows specific configuration.
33 // As an example: KSAUDIO_SPEAKER_7POINT1_SURROUND is mapped to
34 // CHANNEL_LAYOUT_7_1 but the positions of Back L, Back R and Side L, Side R
35 // speakers are different in these two definitions.
36 static ChannelLayout ChannelConfigToChannelLayout(ChannelConfig config) {
37 switch (config) {
38 case KSAUDIO_SPEAKER_DIRECTOUT:
39 DVLOG(2) << "KSAUDIO_SPEAKER_DIRECTOUT=>CHANNEL_LAYOUT_NONE";
40 return CHANNEL_LAYOUT_NONE;
41 case KSAUDIO_SPEAKER_MONO:
42 DVLOG(2) << "KSAUDIO_SPEAKER_MONO=>CHANNEL_LAYOUT_MONO";
43 return CHANNEL_LAYOUT_MONO;
44 case KSAUDIO_SPEAKER_STEREO:
45 DVLOG(2) << "KSAUDIO_SPEAKER_STEREO=>CHANNEL_LAYOUT_STEREO";
46 return CHANNEL_LAYOUT_STEREO;
47 case KSAUDIO_SPEAKER_QUAD:
48 DVLOG(2) << "KSAUDIO_SPEAKER_QUAD=>CHANNEL_LAYOUT_QUAD";
49 return CHANNEL_LAYOUT_QUAD;
50 case KSAUDIO_SPEAKER_SURROUND:
51 DVLOG(2) << "KSAUDIO_SPEAKER_SURROUND=>CHANNEL_LAYOUT_4_0";
52 return CHANNEL_LAYOUT_4_0;
53 case KSAUDIO_SPEAKER_5POINT1:
54 DVLOG(2) << "KSAUDIO_SPEAKER_5POINT1=>CHANNEL_LAYOUT_5_1_BACK";
55 return CHANNEL_LAYOUT_5_1_BACK;
56 case KSAUDIO_SPEAKER_5POINT1_SURROUND:
57 DVLOG(2) << "KSAUDIO_SPEAKER_5POINT1_SURROUND=>CHANNEL_LAYOUT_5_1";
58 return CHANNEL_LAYOUT_5_1;
59 case KSAUDIO_SPEAKER_7POINT1:
60 DVLOG(2) << "KSAUDIO_SPEAKER_7POINT1=>CHANNEL_LAYOUT_7_1_WIDE";
61 return CHANNEL_LAYOUT_7_1_WIDE;
62 case KSAUDIO_SPEAKER_7POINT1_SURROUND:
63 DVLOG(2) << "KSAUDIO_SPEAKER_7POINT1_SURROUND=>CHANNEL_LAYOUT_7_1";
64 return CHANNEL_LAYOUT_7_1;
65 default:
66 DVLOG(2) << "Unsupported channel configuration: " << config;
67 return CHANNEL_LAYOUT_UNSUPPORTED;
71 // TODO(henrika): add mapping for all types in the ChannelLayout enumerator.
72 static ChannelConfig ChannelLayoutToChannelConfig(ChannelLayout layout) {
73 switch (layout) {
74 case CHANNEL_LAYOUT_NONE:
75 DVLOG(2) << "CHANNEL_LAYOUT_NONE=>KSAUDIO_SPEAKER_UNSUPPORTED";
76 return KSAUDIO_SPEAKER_UNSUPPORTED;
77 case CHANNEL_LAYOUT_UNSUPPORTED:
78 DVLOG(2) << "CHANNEL_LAYOUT_UNSUPPORTED=>KSAUDIO_SPEAKER_UNSUPPORTED";
79 return KSAUDIO_SPEAKER_UNSUPPORTED;
80 case CHANNEL_LAYOUT_MONO:
81 DVLOG(2) << "CHANNEL_LAYOUT_MONO=>KSAUDIO_SPEAKER_MONO";
82 return KSAUDIO_SPEAKER_MONO;
83 case CHANNEL_LAYOUT_STEREO:
84 DVLOG(2) << "CHANNEL_LAYOUT_STEREO=>KSAUDIO_SPEAKER_STEREO";
85 return KSAUDIO_SPEAKER_STEREO;
86 case CHANNEL_LAYOUT_QUAD:
87 DVLOG(2) << "CHANNEL_LAYOUT_QUAD=>KSAUDIO_SPEAKER_QUAD";
88 return KSAUDIO_SPEAKER_QUAD;
89 case CHANNEL_LAYOUT_4_0:
90 DVLOG(2) << "CHANNEL_LAYOUT_4_0=>KSAUDIO_SPEAKER_SURROUND";
91 return KSAUDIO_SPEAKER_SURROUND;
92 case CHANNEL_LAYOUT_5_1_BACK:
93 DVLOG(2) << "CHANNEL_LAYOUT_5_1_BACK=>KSAUDIO_SPEAKER_5POINT1";
94 return KSAUDIO_SPEAKER_5POINT1;
95 case CHANNEL_LAYOUT_5_1:
96 DVLOG(2) << "CHANNEL_LAYOUT_5_1=>KSAUDIO_SPEAKER_5POINT1_SURROUND";
97 return KSAUDIO_SPEAKER_5POINT1_SURROUND;
98 case CHANNEL_LAYOUT_7_1_WIDE:
99 DVLOG(2) << "CHANNEL_LAYOUT_7_1_WIDE=>KSAUDIO_SPEAKER_7POINT1";
100 return KSAUDIO_SPEAKER_7POINT1;
101 case CHANNEL_LAYOUT_7_1:
102 DVLOG(2) << "CHANNEL_LAYOUT_7_1=>KSAUDIO_SPEAKER_7POINT1_SURROUND";
103 return KSAUDIO_SPEAKER_7POINT1_SURROUND;
104 default:
105 DVLOG(2) << "Unsupported channel layout: " << layout;
106 return KSAUDIO_SPEAKER_UNSUPPORTED;
110 static std::ostream& operator<<(std::ostream& os,
111 const WAVEFORMATPCMEX& format) {
112 os << "wFormatTag: 0x" << std::hex << format.Format.wFormatTag
113 << ", nChannels: " << std::dec << format.Format.nChannels
114 << ", nSamplesPerSec: " << format.Format.nSamplesPerSec
115 << ", nAvgBytesPerSec: " << format.Format.nAvgBytesPerSec
116 << ", nBlockAlign: " << format.Format.nBlockAlign
117 << ", wBitsPerSample: " << format.Format.wBitsPerSample
118 << ", cbSize: " << format.Format.cbSize
119 << ", wValidBitsPerSample: " << format.Samples.wValidBitsPerSample
120 << ", dwChannelMask: 0x" << std::hex << format.dwChannelMask;
121 return os;
124 static bool LoadAudiosesDll() {
125 static const wchar_t* const kAudiosesDLL =
126 L"%WINDIR%\\system32\\audioses.dll";
128 wchar_t path[MAX_PATH] = {0};
129 ExpandEnvironmentStringsW(kAudiosesDLL, path, arraysize(path));
130 return (LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH) != NULL);
133 static bool CanCreateDeviceEnumerator() {
134 ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
135 HRESULT hr = device_enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
136 NULL, CLSCTX_INPROC_SERVER);
138 // If we hit CO_E_NOTINITIALIZED, CoInitialize has not been called and it
139 // must be called at least once for each thread that uses the COM library.
140 CHECK_NE(hr, CO_E_NOTINITIALIZED);
142 return SUCCEEDED(hr);
145 static std::string GetDeviceID(IMMDevice* device) {
146 ScopedCoMem<WCHAR> device_id_com;
147 std::string device_id;
148 if (SUCCEEDED(device->GetId(&device_id_com)))
149 WideToUTF8(device_id_com, wcslen(device_id_com), &device_id);
150 return device_id;
153 bool CoreAudioUtil::IsSupported() {
154 // It is possible to force usage of WaveXxx APIs by using a command line flag.
155 const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
156 if (cmd_line->HasSwitch(switches::kForceWaveAudio)) {
157 LOG(WARNING) << "Forcing usage of Windows WaveXxx APIs";
158 return false;
161 // Microsoft does not plan to make the Core Audio APIs available for use
162 // with earlier versions of Windows, including Microsoft Windows Server 2003,
163 // Windows XP, Windows Millennium Edition, Windows 2000, and Windows 98.
164 if (base::win::GetVersion() < base::win::VERSION_VISTA)
165 return false;
167 // The audio core APIs are implemented in the Mmdevapi.dll and Audioses.dll
168 // system components.
169 // Dependency Walker shows that it is enough to verify possibility to load
170 // the Audioses DLL since it depends on Mmdevapi.dll.
171 // See http://crbug.com/166397 why this extra step is required to guarantee
172 // Core Audio support.
173 static bool g_audioses_dll_available = LoadAudiosesDll();
174 if (!g_audioses_dll_available)
175 return false;
177 // Being able to load the Audioses.dll does not seem to be sufficient for
178 // all devices to guarantee Core Audio support. To be 100%, we also verify
179 // that it is possible to a create the IMMDeviceEnumerator interface. If this
180 // works as well we should be home free.
181 static bool g_can_create_device_enumerator = CanCreateDeviceEnumerator();
182 LOG_IF(ERROR, !g_can_create_device_enumerator)
183 << "Failed to create Core Audio device enumerator on thread with ID "
184 << GetCurrentThreadId();
185 return g_can_create_device_enumerator;
188 base::TimeDelta CoreAudioUtil::RefererenceTimeToTimeDelta(REFERENCE_TIME time) {
189 // Each unit of reference time is 100 nanoseconds <=> 0.1 microsecond.
190 return base::TimeDelta::FromMicroseconds(0.1 * time + 0.5);
193 AUDCLNT_SHAREMODE CoreAudioUtil::GetShareMode() {
194 const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
195 if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio))
196 return AUDCLNT_SHAREMODE_EXCLUSIVE;
197 return AUDCLNT_SHAREMODE_SHARED;
200 int CoreAudioUtil::NumberOfActiveDevices(EDataFlow data_flow) {
201 DCHECK(IsSupported());
202 // Create the IMMDeviceEnumerator interface.
203 ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
204 CreateDeviceEnumerator();
205 if (!device_enumerator)
206 return 0;
208 // Generate a collection of active (present and not disabled) audio endpoint
209 // devices for the specified data-flow direction.
210 // This method will succeed even if all devices are disabled.
211 ScopedComPtr<IMMDeviceCollection> collection;
212 HRESULT hr = device_enumerator->EnumAudioEndpoints(data_flow,
213 DEVICE_STATE_ACTIVE,
214 collection.Receive());
215 if (FAILED(hr)) {
216 LOG(ERROR) << "IMMDeviceCollection::EnumAudioEndpoints: " << std::hex << hr;
217 return 0;
220 // Retrieve the number of active audio devices for the specified direction
221 UINT number_of_active_devices = 0;
222 collection->GetCount(&number_of_active_devices);
223 DVLOG(2) << ((data_flow == eCapture) ? "[in ] " : "[out] ")
224 << "number of devices: " << number_of_active_devices;
225 return static_cast<int>(number_of_active_devices);
228 ScopedComPtr<IMMDeviceEnumerator> CoreAudioUtil::CreateDeviceEnumerator() {
229 DCHECK(IsSupported());
230 ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
231 HRESULT hr = device_enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
232 NULL, CLSCTX_INPROC_SERVER);
233 CHECK(SUCCEEDED(hr));
234 return device_enumerator;
237 ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDefaultDevice(EDataFlow data_flow,
238 ERole role) {
239 DCHECK(IsSupported());
240 ScopedComPtr<IMMDevice> endpoint_device;
242 // Create the IMMDeviceEnumerator interface.
243 ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
244 CreateDeviceEnumerator();
245 if (!device_enumerator)
246 return endpoint_device;
248 // Retrieve the default audio endpoint for the specified data-flow
249 // direction and role.
250 HRESULT hr = device_enumerator->GetDefaultAudioEndpoint(
251 data_flow, role, endpoint_device.Receive());
253 if (FAILED(hr)) {
254 DVLOG(1) << "IMMDeviceEnumerator::GetDefaultAudioEndpoint: "
255 << std::hex << hr;
256 return endpoint_device;
259 // Verify that the audio endpoint device is active, i.e., that the audio
260 // adapter that connects to the endpoint device is present and enabled.
261 DWORD state = DEVICE_STATE_DISABLED;
262 hr = endpoint_device->GetState(&state);
263 if (SUCCEEDED(hr)) {
264 if (!(state & DEVICE_STATE_ACTIVE)) {
265 DVLOG(1) << "Selected endpoint device is not active";
266 endpoint_device.Release();
269 return endpoint_device;
272 std::string CoreAudioUtil::GetDefaultOutputDeviceID() {
273 DCHECK(IsSupported());
274 ScopedComPtr<IMMDevice> device(CreateDefaultDevice(eRender, eConsole));
275 return device ? GetDeviceID(device) : std::string();
278 ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDevice(
279 const std::string& device_id) {
280 DCHECK(IsSupported());
281 ScopedComPtr<IMMDevice> endpoint_device;
283 // Create the IMMDeviceEnumerator interface.
284 ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
285 CreateDeviceEnumerator();
286 if (!device_enumerator)
287 return endpoint_device;
289 // Retrieve an audio device specified by an endpoint device-identification
290 // string.
291 HRESULT hr = device_enumerator->GetDevice(UTF8ToUTF16(device_id).c_str(),
292 endpoint_device.Receive());
293 DVLOG_IF(1, FAILED(hr)) << "IMMDeviceEnumerator::GetDevice: "
294 << std::hex << hr;
295 return endpoint_device;
298 HRESULT CoreAudioUtil::GetDeviceName(IMMDevice* device, AudioDeviceName* name) {
299 DCHECK(IsSupported());
301 // Retrieve unique name of endpoint device.
302 // Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}".
303 AudioDeviceName device_name;
304 device_name.unique_id = GetDeviceID(device);
305 if (device_name.unique_id.empty())
306 return E_FAIL;
308 // Retrieve user-friendly name of endpoint device.
309 // Example: "Microphone (Realtek High Definition Audio)".
310 ScopedComPtr<IPropertyStore> properties;
311 HRESULT hr = device->OpenPropertyStore(STGM_READ, properties.Receive());
312 if (FAILED(hr))
313 return hr;
314 base::win::ScopedPropVariant friendly_name;
315 hr = properties->GetValue(PKEY_Device_FriendlyName, friendly_name.Receive());
316 if (FAILED(hr))
317 return hr;
318 if (friendly_name.get().vt == VT_LPWSTR && friendly_name.get().pwszVal) {
319 WideToUTF8(friendly_name.get().pwszVal,
320 wcslen(friendly_name.get().pwszVal),
321 &device_name.device_name);
324 *name = device_name;
325 DVLOG(2) << "friendly name: " << device_name.device_name;
326 DVLOG(2) << "unique id : " << device_name.unique_id;
327 return hr;
330 std::string CoreAudioUtil::GetAudioControllerID(IMMDevice* device,
331 IMMDeviceEnumerator* enumerator) {
332 DCHECK(IsSupported());
334 // Fetching the controller device id could be as simple as fetching the value
335 // of the "{B3F8FA53-0004-438E-9003-51A46E139BFC},2" property in the property
336 // store of the |device|, but that key isn't defined in any header and
337 // according to MS should not be relied upon.
338 // So, instead, we go deeper, look at the device topology and fetch the
339 // PKEY_Device_InstanceId of the associated physical audio device.
340 ScopedComPtr<IDeviceTopology> topology;
341 ScopedComPtr<IConnector> connector;
342 ScopedCoMem<WCHAR> filter_id;
343 if (FAILED(device->Activate(__uuidof(IDeviceTopology), CLSCTX_ALL, NULL,
344 topology.ReceiveVoid()) ||
345 // For our purposes checking the first connected device should be enough
346 // and if there are cases where there are more than one device connected
347 // we're not sure how to handle that anyway. So we pass 0.
348 FAILED(topology->GetConnector(0, connector.Receive())) ||
349 FAILED(connector->GetDeviceIdConnectedTo(&filter_id)))) {
350 DLOG(ERROR) << "Failed to get the device identifier of the audio device";
351 return std::string();
354 // Now look at the properties of the connected device node and fetch the
355 // instance id (PKEY_Device_InstanceId) of the device node that uniquely
356 // identifies the controller.
357 ScopedComPtr<IMMDevice> device_node;
358 ScopedComPtr<IPropertyStore> properties;
359 base::win::ScopedPropVariant instance_id;
360 if (FAILED(enumerator->GetDevice(filter_id, device_node.Receive())) ||
361 FAILED(device_node->OpenPropertyStore(STGM_READ, properties.Receive())) ||
362 FAILED(properties->GetValue(PKEY_Device_InstanceId,
363 instance_id.Receive())) ||
364 instance_id.get().vt != VT_LPWSTR) {
365 DLOG(ERROR) << "Failed to get instance id of the audio device node";
366 return std::string();
369 std::string controller_id;
370 WideToUTF8(instance_id.get().pwszVal,
371 wcslen(instance_id.get().pwszVal),
372 &controller_id);
374 return controller_id;
377 std::string CoreAudioUtil::GetMatchingOutputDeviceID(
378 const std::string& input_device_id) {
379 ScopedComPtr<IMMDevice> input_device(CreateDevice(input_device_id));
380 if (!input_device)
381 return std::string();
383 // See if we can get id of the associated controller.
384 ScopedComPtr<IMMDeviceEnumerator> enumerator(CreateDeviceEnumerator());
385 std::string controller_id(GetAudioControllerID(input_device, enumerator));
386 if (controller_id.empty())
387 return std::string();
389 // Now enumerate the available (and active) output devices and see if any of
390 // them is associated with the same controller.
391 ScopedComPtr<IMMDeviceCollection> collection;
392 enumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE,
393 collection.Receive());
394 if (!collection)
395 return std::string();
397 UINT count = 0;
398 collection->GetCount(&count);
399 ScopedComPtr<IMMDevice> output_device;
400 for (UINT i = 0; i < count; ++i) {
401 collection->Item(i, output_device.Receive());
402 std::string output_controller_id(GetAudioControllerID(
403 output_device, enumerator));
404 if (output_controller_id == controller_id)
405 break;
406 output_device = NULL;
409 return output_device ? GetDeviceID(output_device) : std::string();
412 std::string CoreAudioUtil::GetFriendlyName(const std::string& device_id) {
413 DCHECK(IsSupported());
414 ScopedComPtr<IMMDevice> audio_device = CreateDevice(device_id);
415 if (!audio_device)
416 return std::string();
418 AudioDeviceName device_name;
419 HRESULT hr = GetDeviceName(audio_device, &device_name);
420 if (FAILED(hr))
421 return std::string();
423 return device_name.device_name;
426 bool CoreAudioUtil::DeviceIsDefault(EDataFlow flow,
427 ERole role,
428 const std::string& device_id) {
429 DCHECK(IsSupported());
430 ScopedComPtr<IMMDevice> device = CreateDefaultDevice(flow, role);
431 if (!device)
432 return false;
434 std::string str_default(GetDeviceID(device));
435 return device_id.compare(str_default) == 0;
438 EDataFlow CoreAudioUtil::GetDataFlow(IMMDevice* device) {
439 DCHECK(IsSupported());
440 ScopedComPtr<IMMEndpoint> endpoint;
441 HRESULT hr = device->QueryInterface(endpoint.Receive());
442 if (FAILED(hr)) {
443 DVLOG(1) << "IMMDevice::QueryInterface: " << std::hex << hr;
444 return eAll;
447 EDataFlow data_flow;
448 hr = endpoint->GetDataFlow(&data_flow);
449 if (FAILED(hr)) {
450 DVLOG(1) << "IMMEndpoint::GetDataFlow: " << std::hex << hr;
451 return eAll;
453 return data_flow;
456 ScopedComPtr<IAudioClient> CoreAudioUtil::CreateClient(
457 IMMDevice* audio_device) {
458 DCHECK(IsSupported());
460 // Creates and activates an IAudioClient COM object given the selected
461 // endpoint device.
462 ScopedComPtr<IAudioClient> audio_client;
463 HRESULT hr = audio_device->Activate(__uuidof(IAudioClient),
464 CLSCTX_INPROC_SERVER,
465 NULL,
466 audio_client.ReceiveVoid());
467 DVLOG_IF(1, FAILED(hr)) << "IMMDevice::Activate: " << std::hex << hr;
468 return audio_client;
471 ScopedComPtr<IAudioClient> CoreAudioUtil::CreateDefaultClient(
472 EDataFlow data_flow, ERole role) {
473 DCHECK(IsSupported());
474 ScopedComPtr<IMMDevice> default_device(CreateDefaultDevice(data_flow, role));
475 return (default_device ? CreateClient(default_device) :
476 ScopedComPtr<IAudioClient>());
479 ScopedComPtr<IAudioClient> CoreAudioUtil::CreateClient(
480 const std::string& device_id, EDataFlow data_flow, ERole role) {
481 if (device_id.empty())
482 return CreateDefaultClient(data_flow, role);
484 ScopedComPtr<IMMDevice> device(CreateDevice(device_id));
485 if (!device)
486 return ScopedComPtr<IAudioClient>();
488 return CreateClient(device);
491 HRESULT CoreAudioUtil::GetSharedModeMixFormat(
492 IAudioClient* client, WAVEFORMATPCMEX* format) {
493 DCHECK(IsSupported());
494 ScopedCoMem<WAVEFORMATPCMEX> format_pcmex;
495 HRESULT hr = client->GetMixFormat(
496 reinterpret_cast<WAVEFORMATEX**>(&format_pcmex));
497 if (FAILED(hr))
498 return hr;
500 size_t bytes = sizeof(WAVEFORMATEX) + format_pcmex->Format.cbSize;
501 DCHECK_EQ(bytes, sizeof(WAVEFORMATPCMEX));
503 memcpy(format, format_pcmex, bytes);
504 DVLOG(2) << *format;
506 return hr;
509 bool CoreAudioUtil::IsFormatSupported(IAudioClient* client,
510 AUDCLNT_SHAREMODE share_mode,
511 const WAVEFORMATPCMEX* format) {
512 DCHECK(IsSupported());
513 ScopedCoMem<WAVEFORMATEXTENSIBLE> closest_match;
514 HRESULT hr = client->IsFormatSupported(
515 share_mode, reinterpret_cast<const WAVEFORMATEX*>(format),
516 reinterpret_cast<WAVEFORMATEX**>(&closest_match));
518 // This log can only be triggered for shared mode.
519 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported "
520 << "but a closest match exists.";
521 // This log can be triggered both for shared and exclusive modes.
522 DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format.";
523 if (hr == S_FALSE) {
524 DVLOG(2) << *closest_match;
527 return (hr == S_OK);
530 bool CoreAudioUtil::IsChannelLayoutSupported(const std::string& device_id,
531 EDataFlow data_flow,
532 ERole role,
533 ChannelLayout channel_layout) {
534 DCHECK(IsSupported());
536 // First, get the preferred mixing format for shared mode streams.
538 ScopedComPtr<IAudioClient> client(CreateClient(device_id, data_flow, role));
539 if (!client)
540 return false;
542 WAVEFORMATPCMEX format;
543 HRESULT hr = GetSharedModeMixFormat(client, &format);
544 if (FAILED(hr))
545 return false;
547 // Next, check if it is possible to use an alternative format where the
548 // channel layout (and possibly number of channels) is modified.
550 // Convert generic channel layout into Windows-specific channel configuration.
551 ChannelConfig new_config = ChannelLayoutToChannelConfig(channel_layout);
552 if (new_config == KSAUDIO_SPEAKER_UNSUPPORTED) {
553 return false;
555 format.dwChannelMask = new_config;
557 // Modify the format if the new channel layout has changed the number of
558 // utilized channels.
559 const int channels = ChannelLayoutToChannelCount(channel_layout);
560 if (channels != format.Format.nChannels) {
561 format.Format.nChannels = channels;
562 format.Format.nBlockAlign = (format.Format.wBitsPerSample / 8) * channels;
563 format.Format.nAvgBytesPerSec = format.Format.nSamplesPerSec *
564 format.Format.nBlockAlign;
566 DVLOG(2) << format;
568 // Some devices can initialize a shared-mode stream with a format that is
569 // not identical to the mix format obtained from the GetMixFormat() method.
570 // However, chances of succeeding increases if we use the same number of
571 // channels and the same sample rate as the mix format. I.e, this call will
572 // return true only in those cases where the audio engine is able to support
573 // an even wider range of shared-mode formats where the installation package
574 // for the audio device includes a local effects (LFX) audio processing
575 // object (APO) that can handle format conversions.
576 return CoreAudioUtil::IsFormatSupported(client, AUDCLNT_SHAREMODE_SHARED,
577 &format);
580 HRESULT CoreAudioUtil::GetDevicePeriod(IAudioClient* client,
581 AUDCLNT_SHAREMODE share_mode,
582 REFERENCE_TIME* device_period) {
583 DCHECK(IsSupported());
585 // Get the period of the engine thread.
586 REFERENCE_TIME default_period = 0;
587 REFERENCE_TIME minimum_period = 0;
588 HRESULT hr = client->GetDevicePeriod(&default_period, &minimum_period);
589 if (FAILED(hr))
590 return hr;
592 *device_period = (share_mode == AUDCLNT_SHAREMODE_SHARED) ? default_period :
593 minimum_period;
594 DVLOG(2) << "device_period: "
595 << RefererenceTimeToTimeDelta(*device_period).InMillisecondsF()
596 << " [ms]";
597 return hr;
600 HRESULT CoreAudioUtil::GetPreferredAudioParameters(
601 IAudioClient* client, AudioParameters* params) {
602 DCHECK(IsSupported());
603 WAVEFORMATPCMEX mix_format;
604 HRESULT hr = GetSharedModeMixFormat(client, &mix_format);
605 if (FAILED(hr))
606 return hr;
608 REFERENCE_TIME default_period = 0;
609 hr = GetDevicePeriod(client, AUDCLNT_SHAREMODE_SHARED, &default_period);
610 if (FAILED(hr))
611 return hr;
613 // Get the integer mask which corresponds to the channel layout the
614 // audio engine uses for its internal processing/mixing of shared-mode
615 // streams. This mask indicates which channels are present in the multi-
616 // channel stream. The least significant bit corresponds with the Front Left
617 // speaker, the next least significant bit corresponds to the Front Right
618 // speaker, and so on, continuing in the order defined in KsMedia.h.
619 // See http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083.aspx
620 // for more details.
621 ChannelConfig channel_config = mix_format.dwChannelMask;
623 // Convert Microsoft's channel configuration to genric ChannelLayout.
624 ChannelLayout channel_layout = ChannelConfigToChannelLayout(channel_config);
626 // Preferred sample rate.
627 int sample_rate = mix_format.Format.nSamplesPerSec;
629 // TODO(henrika): possibly use format.Format.wBitsPerSample here instead.
630 // We use a hard-coded value of 16 bits per sample today even if most audio
631 // engines does the actual mixing in 32 bits per sample.
632 int bits_per_sample = 16;
634 // We are using the native device period to derive the smallest possible
635 // buffer size in shared mode. Note that the actual endpoint buffer will be
636 // larger than this size but it will be possible to fill it up in two calls.
637 // TODO(henrika): ensure that this scheme works for capturing as well.
638 int frames_per_buffer = static_cast<int>(sample_rate *
639 RefererenceTimeToTimeDelta(default_period).InSecondsF() + 0.5);
641 DVLOG(1) << "channel_layout : " << channel_layout;
642 DVLOG(1) << "sample_rate : " << sample_rate;
643 DVLOG(1) << "bits_per_sample : " << bits_per_sample;
644 DVLOG(1) << "frames_per_buffer: " << frames_per_buffer;
646 AudioParameters audio_params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
647 channel_layout,
648 sample_rate,
649 bits_per_sample,
650 frames_per_buffer);
652 *params = audio_params;
653 return hr;
656 HRESULT CoreAudioUtil::GetPreferredAudioParameters(
657 EDataFlow data_flow, ERole role, AudioParameters* params) {
658 DCHECK(IsSupported());
659 ScopedComPtr<IAudioClient> client(CreateDefaultClient(data_flow, role));
660 if (!client) {
661 // Map NULL-pointer to new error code which can be different from the
662 // actual error code. The exact value is not important here.
663 return AUDCLNT_E_ENDPOINT_CREATE_FAILED;
665 return GetPreferredAudioParameters(client, params);
668 HRESULT CoreAudioUtil::GetPreferredAudioParameters(
669 const std::string& device_id, AudioParameters* params) {
670 DCHECK(IsSupported());
671 ScopedComPtr<IMMDevice> device(CreateDevice(device_id));
672 if (!device) {
673 // Map NULL-pointer to new error code which can be different from the
674 // actual error code. The exact value is not important here.
675 return AUDCLNT_E_DEVICE_INVALIDATED;
678 ScopedComPtr<IAudioClient> client(CreateClient(device));
679 if (!client) {
680 // Map NULL-pointer to new error code which can be different from the
681 // actual error code. The exact value is not important here.
682 return AUDCLNT_E_ENDPOINT_CREATE_FAILED;
684 return GetPreferredAudioParameters(client, params);
687 ChannelConfig CoreAudioUtil::GetChannelConfig(const std::string& device_id,
688 EDataFlow data_flow) {
689 ScopedComPtr<IAudioClient> client(
690 CreateClient(device_id, data_flow, eConsole));
692 WAVEFORMATPCMEX format = {0};
693 if (!client || FAILED(GetSharedModeMixFormat(client, &format)))
694 return 0;
696 return static_cast<ChannelConfig>(format.dwChannelMask);
699 HRESULT CoreAudioUtil::SharedModeInitialize(IAudioClient* client,
700 const WAVEFORMATPCMEX* format,
701 HANDLE event_handle,
702 uint32* endpoint_buffer_size) {
703 DCHECK(IsSupported());
705 // Use default flags (i.e, dont set AUDCLNT_STREAMFLAGS_NOPERSIST) to
706 // ensure that the volume level and muting state for a rendering session
707 // are persistent across system restarts. The volume level and muting
708 // state for a capture session are never persistent.
709 DWORD stream_flags = 0;
711 // Enable event-driven streaming if a valid event handle is provided.
712 // After the stream starts, the audio engine will signal the event handle
713 // to notify the client each time a buffer becomes ready to process.
714 // Event-driven buffering is supported for both rendering and capturing.
715 // Both shared-mode and exclusive-mode streams can use event-driven buffering.
716 bool use_event = (event_handle != NULL &&
717 event_handle != INVALID_HANDLE_VALUE);
718 if (use_event)
719 stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
720 DVLOG(2) << "stream_flags: 0x" << std::hex << stream_flags;
722 // Initialize the shared mode client for minimal delay.
723 HRESULT hr = client->Initialize(AUDCLNT_SHAREMODE_SHARED,
724 stream_flags,
727 reinterpret_cast<const WAVEFORMATEX*>(format),
728 NULL);
729 if (FAILED(hr)) {
730 DVLOG(1) << "IAudioClient::Initialize: " << std::hex << hr;
731 return hr;
734 if (use_event) {
735 hr = client->SetEventHandle(event_handle);
736 if (FAILED(hr)) {
737 DVLOG(1) << "IAudioClient::SetEventHandle: " << std::hex << hr;
738 return hr;
742 UINT32 buffer_size_in_frames = 0;
743 hr = client->GetBufferSize(&buffer_size_in_frames);
744 if (FAILED(hr)) {
745 DVLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr;
746 return hr;
749 *endpoint_buffer_size = buffer_size_in_frames;
750 DVLOG(2) << "endpoint buffer size: " << buffer_size_in_frames;
752 // TODO(henrika): utilize when delay measurements are added.
753 REFERENCE_TIME latency = 0;
754 hr = client->GetStreamLatency(&latency);
755 DVLOG(2) << "stream latency: "
756 << RefererenceTimeToTimeDelta(latency).InMillisecondsF() << " [ms]";
757 return hr;
760 ScopedComPtr<IAudioRenderClient> CoreAudioUtil::CreateRenderClient(
761 IAudioClient* client) {
762 DCHECK(IsSupported());
764 // Get access to the IAudioRenderClient interface. This interface
765 // enables us to write output data to a rendering endpoint buffer.
766 ScopedComPtr<IAudioRenderClient> audio_render_client;
767 HRESULT hr = client->GetService(__uuidof(IAudioRenderClient),
768 audio_render_client.ReceiveVoid());
769 if (FAILED(hr)) {
770 DVLOG(1) << "IAudioClient::GetService: " << std::hex << hr;
771 return ScopedComPtr<IAudioRenderClient>();
773 return audio_render_client;
776 ScopedComPtr<IAudioCaptureClient> CoreAudioUtil::CreateCaptureClient(
777 IAudioClient* client) {
778 DCHECK(IsSupported());
780 // Get access to the IAudioCaptureClient interface. This interface
781 // enables us to read input data from a capturing endpoint buffer.
782 ScopedComPtr<IAudioCaptureClient> audio_capture_client;
783 HRESULT hr = client->GetService(__uuidof(IAudioCaptureClient),
784 audio_capture_client.ReceiveVoid());
785 if (FAILED(hr)) {
786 DVLOG(1) << "IAudioClient::GetService: " << std::hex << hr;
787 return ScopedComPtr<IAudioCaptureClient>();
789 return audio_capture_client;
792 bool CoreAudioUtil::FillRenderEndpointBufferWithSilence(
793 IAudioClient* client, IAudioRenderClient* render_client) {
794 DCHECK(IsSupported());
796 UINT32 endpoint_buffer_size = 0;
797 if (FAILED(client->GetBufferSize(&endpoint_buffer_size)))
798 return false;
800 UINT32 num_queued_frames = 0;
801 if (FAILED(client->GetCurrentPadding(&num_queued_frames)))
802 return false;
804 BYTE* data = NULL;
805 int num_frames_to_fill = endpoint_buffer_size - num_queued_frames;
806 if (FAILED(render_client->GetBuffer(num_frames_to_fill, &data)))
807 return false;
809 // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to
810 // explicitly write silence data to the rendering buffer.
811 DVLOG(2) << "filling up " << num_frames_to_fill << " frames with silence";
812 return SUCCEEDED(render_client->ReleaseBuffer(num_frames_to_fill,
813 AUDCLNT_BUFFERFLAGS_SILENT));
816 } // namespace media