Apply room rolloff factors even when Auxiliary Send Auto is off
[openal-soft.git] / core / voice.cpp
blob5384521593cae95d7bf428dddb24569be4e97af1
2 #include "config.h"
4 #include "voice.h"
6 #include <algorithm>
7 #include <array>
8 #include <atomic>
9 #include <cassert>
10 #include <climits>
11 #include <cstdint>
12 #include <cstdlib>
13 #include <iterator>
14 #include <memory>
15 #include <new>
16 #include <optional>
17 #include <utility>
18 #include <vector>
20 #include "alnumeric.h"
21 #include "alspan.h"
22 #include "alstring.h"
23 #include "ambidefs.h"
24 #include "async_event.h"
25 #include "buffer_storage.h"
26 #include "context.h"
27 #include "cpu_caps.h"
28 #include "devformat.h"
29 #include "device.h"
30 #include "filters/biquad.h"
31 #include "filters/nfc.h"
32 #include "filters/splitter.h"
33 #include "fmt_traits.h"
34 #include "logging.h"
35 #include "mixer.h"
36 #include "mixer/defs.h"
37 #include "mixer/hrtfdefs.h"
38 #include "opthelpers.h"
39 #include "resampler_limits.h"
40 #include "ringbuffer.h"
41 #include "vector.h"
42 #include "voice_change.h"
44 struct CTag;
45 #ifdef HAVE_SSE
46 struct SSETag;
47 #endif
48 #ifdef HAVE_NEON
49 struct NEONTag;
50 #endif
53 static_assert(!(DeviceBase::MixerLineSize&3), "MixerLineSize must be a multiple of 4");
54 static_assert(!(MaxResamplerEdge&3), "MaxResamplerEdge is not a multiple of 4");
56 static_assert((BufferLineSize-1)/MaxPitch > 0, "MaxPitch is too large for BufferLineSize!");
57 static_assert((INT_MAX>>MixerFracBits)/MaxPitch > BufferLineSize,
58 "MaxPitch and/or BufferLineSize are too large for MixerFracBits!");
60 namespace {
62 using uint = unsigned int;
63 using namespace std::chrono;
64 using namespace std::string_view_literals;
66 using HrtfMixerFunc = void(*)(const al::span<const float> InSamples,
67 const al::span<float2> AccumSamples, const uint IrSize, const MixHrtfFilter *hrtfparams,
68 const size_t SamplesToDo);
69 using HrtfMixerBlendFunc = void(*)(const al::span<const float> InSamples,
70 const al::span<float2> AccumSamples, const uint IrSize, const HrtfFilter *oldparams,
71 const MixHrtfFilter *newparams, const size_t SamplesToDo);
73 HrtfMixerFunc MixHrtfSamples{MixHrtf_<CTag>};
74 HrtfMixerBlendFunc MixHrtfBlendSamples{MixHrtfBlend_<CTag>};
76 inline MixerOutFunc SelectMixer()
78 #ifdef HAVE_NEON
79 if((CPUCapFlags&CPU_CAP_NEON))
80 return Mix_<NEONTag>;
81 #endif
82 #ifdef HAVE_SSE
83 if((CPUCapFlags&CPU_CAP_SSE))
84 return Mix_<SSETag>;
85 #endif
86 return Mix_<CTag>;
89 inline MixerOneFunc SelectMixerOne()
91 #ifdef HAVE_NEON
92 if((CPUCapFlags&CPU_CAP_NEON))
93 return Mix_<NEONTag>;
94 #endif
95 #ifdef HAVE_SSE
96 if((CPUCapFlags&CPU_CAP_SSE))
97 return Mix_<SSETag>;
98 #endif
99 return Mix_<CTag>;
102 inline HrtfMixerFunc SelectHrtfMixer()
104 #ifdef HAVE_NEON
105 if((CPUCapFlags&CPU_CAP_NEON))
106 return MixHrtf_<NEONTag>;
107 #endif
108 #ifdef HAVE_SSE
109 if((CPUCapFlags&CPU_CAP_SSE))
110 return MixHrtf_<SSETag>;
111 #endif
112 return MixHrtf_<CTag>;
115 inline HrtfMixerBlendFunc SelectHrtfBlendMixer()
117 #ifdef HAVE_NEON
118 if((CPUCapFlags&CPU_CAP_NEON))
119 return MixHrtfBlend_<NEONTag>;
120 #endif
121 #ifdef HAVE_SSE
122 if((CPUCapFlags&CPU_CAP_SSE))
123 return MixHrtfBlend_<SSETag>;
124 #endif
125 return MixHrtfBlend_<CTag>;
128 } // namespace
130 void Voice::InitMixer(std::optional<std::string> resopt)
132 if(resopt)
134 struct ResamplerEntry {
135 const std::string_view name;
136 const Resampler resampler;
138 constexpr std::array ResamplerList{
139 ResamplerEntry{"none"sv, Resampler::Point},
140 ResamplerEntry{"point"sv, Resampler::Point},
141 ResamplerEntry{"linear"sv, Resampler::Linear},
142 ResamplerEntry{"spline"sv, Resampler::Spline},
143 ResamplerEntry{"gaussian"sv, Resampler::Gaussian},
144 ResamplerEntry{"bsinc12"sv, Resampler::BSinc12},
145 ResamplerEntry{"fast_bsinc12"sv, Resampler::FastBSinc12},
146 ResamplerEntry{"bsinc24"sv, Resampler::BSinc24},
147 ResamplerEntry{"fast_bsinc24"sv, Resampler::FastBSinc24},
150 std::string_view resampler{*resopt};
152 if (al::case_compare(resampler, "cubic"sv) == 0)
154 WARN("Resampler option \"%s\" is deprecated, using spline\n", resopt->c_str());
155 resampler = "spline"sv;
157 else if(al::case_compare(resampler, "sinc4"sv) == 0
158 || al::case_compare(resampler, "sinc8"sv) == 0)
160 WARN("Resampler option \"%s\" is deprecated, using gaussian\n", resopt->c_str());
161 resampler = "gaussian"sv;
163 else if(al::case_compare(resampler, "bsinc"sv) == 0)
165 WARN("Resampler option \"%s\" is deprecated, using bsinc12\n", resopt->c_str());
166 resampler = "bsinc12"sv;
169 auto iter = std::find_if(ResamplerList.begin(), ResamplerList.end(),
170 [resampler](const ResamplerEntry &entry) -> bool
171 { return al::case_compare(resampler, entry.name) == 0; });
172 if(iter == ResamplerList.end())
173 ERR("Invalid resampler: %s\n", resopt->c_str());
174 else
175 ResamplerDefault = iter->resampler;
178 MixSamplesOut = SelectMixer();
179 MixSamplesOne = SelectMixerOne();
180 MixHrtfBlendSamples = SelectHrtfBlendMixer();
181 MixHrtfSamples = SelectHrtfMixer();
185 namespace {
187 /* IMA ADPCM Stepsize table */
188 constexpr std::array<int,89> IMAStep_size{{
189 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 19,
190 21, 23, 25, 28, 31, 34, 37, 41, 45, 50, 55,
191 60, 66, 73, 80, 88, 97, 107, 118, 130, 143, 157,
192 173, 190, 209, 230, 253, 279, 307, 337, 371, 408, 449,
193 494, 544, 598, 658, 724, 796, 876, 963, 1060, 1166, 1282,
194 1411, 1552, 1707, 1878, 2066, 2272, 2499, 2749, 3024, 3327, 3660,
195 4026, 4428, 4871, 5358, 5894, 6484, 7132, 7845, 8630, 9493,10442,
196 11487,12635,13899,15289,16818,18500,20350,22358,24633,27086,29794,
197 32767
200 /* IMA4 ADPCM Codeword decode table */
201 constexpr std::array<int,16> IMA4Codeword{{
202 1, 3, 5, 7, 9, 11, 13, 15,
203 -1,-3,-5,-7,-9,-11,-13,-15,
206 /* IMA4 ADPCM Step index adjust decode table */
207 constexpr std::array<int,16>IMA4Index_adjust{{
208 -1,-1,-1,-1, 2, 4, 6, 8,
209 -1,-1,-1,-1, 2, 4, 6, 8
212 /* MSADPCM Adaption table */
213 constexpr std::array<int,16> MSADPCMAdaption{{
214 230, 230, 230, 230, 307, 409, 512, 614,
215 768, 614, 512, 409, 307, 230, 230, 230
218 /* MSADPCM Adaption Coefficient tables */
219 constexpr std::array MSADPCMAdaptionCoeff{
220 std::array{256, 0},
221 std::array{512, -256},
222 std::array{ 0, 0},
223 std::array{192, 64},
224 std::array{240, 0},
225 std::array{460, -208},
226 std::array{392, -232}
230 void SendSourceStoppedEvent(ContextBase *context, uint id)
232 RingBuffer *ring{context->mAsyncEvents.get()};
233 auto evt_vec = ring->getWriteVector();
234 if(evt_vec.first.len < 1) return;
236 auto &evt = InitAsyncEvent<AsyncSourceStateEvent>(evt_vec.first.buf);
237 evt.mId = id;
238 evt.mState = AsyncSrcState::Stop;
240 ring->writeAdvance(1);
244 al::span<const float> DoFilters(BiquadFilter &lpfilter, BiquadFilter &hpfilter,
245 const al::span<float,BufferLineSize> dst, const al::span<const float> src, int type)
247 switch(type)
249 case AF_None:
250 lpfilter.clear();
251 hpfilter.clear();
252 break;
254 case AF_LowPass:
255 lpfilter.process(src, dst);
256 hpfilter.clear();
257 return dst.first(src.size());
258 case AF_HighPass:
259 lpfilter.clear();
260 hpfilter.process(src, dst);
261 return dst.first(src.size());
263 case AF_BandPass:
264 DualBiquad{lpfilter, hpfilter}.process(src, dst);
265 return dst.first(src.size());
267 return src;
271 template<FmtType Type>
272 inline void LoadSamples(const al::span<float> dstSamples, const al::span<const std::byte> srcData,
273 const size_t srcChan, const size_t srcOffset, const size_t srcStep,
274 const size_t samplesPerBlock [[maybe_unused]]) noexcept
276 using TypeTraits = al::FmtTypeTraits<Type>;
277 using SampleType = typename TypeTraits::Type;
278 static constexpr size_t sampleSize{sizeof(SampleType)};
279 assert(srcChan < srcStep);
280 auto converter = TypeTraits{};
282 al::span<const SampleType> src{reinterpret_cast<const SampleType*>(srcData.data()),
283 srcData.size()/sampleSize};
284 auto ssrc = src.cbegin() + ptrdiff_t(srcOffset*srcStep);
285 std::generate(dstSamples.begin(), dstSamples.end(), [&ssrc,srcChan,srcStep,converter]
287 auto ret = converter(ssrc[srcChan]);
288 ssrc += ptrdiff_t(srcStep);
289 return ret;
293 template<>
294 inline void LoadSamples<FmtIMA4>(al::span<float> dstSamples, al::span<const std::byte> src,
295 const size_t srcChan, const size_t srcOffset, const size_t srcStep,
296 const size_t samplesPerBlock) noexcept
298 static constexpr int MaxStepIndex{static_cast<int>(IMAStep_size.size()) - 1};
300 assert(srcStep > 0 || srcStep <= 2);
301 assert(srcChan < srcStep);
302 assert(samplesPerBlock > 1);
303 const size_t blockBytes{((samplesPerBlock-1)/2 + 4)*srcStep};
305 /* Skip to the ADPCM block containing the srcOffset sample. */
306 src = src.subspan(srcOffset/samplesPerBlock*blockBytes);
307 /* Calculate how many samples need to be skipped in the block. */
308 size_t skip{srcOffset % samplesPerBlock};
310 /* NOTE: This could probably be optimized better. */
311 auto dst = dstSamples.begin();
312 while(dst != dstSamples.end())
314 /* Each IMA4 block starts with a signed 16-bit sample, and a signed
315 * 16-bit table index. The table index needs to be clamped.
317 int sample{int(src[srcChan*4 + 0]) | (int(src[srcChan*4 + 1]) << 8)};
318 int index{int(src[srcChan*4 + 2]) | (int(src[srcChan*4 + 3]) << 8)};
319 auto nibbleData = src.subspan((srcStep+srcChan)*4);
320 src = src.subspan(blockBytes);
322 sample = (sample^0x8000) - 32768;
323 index = std::clamp((index^0x8000) - 32768, 0, MaxStepIndex);
325 if(skip == 0)
327 *dst = static_cast<float>(sample) / 32768.0f;
328 if(++dst == dstSamples.end()) return;
330 else
331 --skip;
333 auto decode_sample = [&sample,&index](const uint8_t nibble)
335 sample += IMA4Codeword[nibble] * IMAStep_size[static_cast<uint>(index)] / 8;
336 sample = std::clamp(sample, -32768, 32767);
338 index += IMA4Index_adjust[nibble];
339 index = std::clamp(index, 0, MaxStepIndex);
341 return sample;
344 /* The rest of the block is arranged as a series of nibbles, contained
345 * in 4 *bytes* per channel interleaved. So every 8 nibbles we need to
346 * skip 4 bytes per channel to get the next nibbles for this channel.
348 * First, decode the samples that we need to skip in the block (will
349 * always be less than the block size). They need to be decoded despite
350 * being ignored for proper state on the remaining samples.
352 static constexpr auto NibbleMask = std::byte{0xf};
353 size_t nibbleOffset{0};
354 const size_t startOffset{skip + 1};
355 for(;skip;--skip)
357 const size_t byteShift{(nibbleOffset&1) * 4};
358 const size_t wordOffset{(nibbleOffset>>1) & ~3_uz};
359 const size_t byteOffset{wordOffset*srcStep + ((nibbleOffset>>1)&3u)};
360 ++nibbleOffset;
362 const auto nval = (nibbleData[byteOffset]>>byteShift) & NibbleMask;
363 std::ignore = decode_sample(al::to_underlying(nval));
366 /* Second, decode the rest of the block and write to the output, until
367 * the end of the block or the end of output.
369 const size_t todo{std::min(samplesPerBlock-startOffset, size_t(dstSamples.end()-dst))};
370 dst = std::generate_n(dst, todo, [&]
372 const size_t byteShift{(nibbleOffset&1) * 4};
373 const size_t wordOffset{(nibbleOffset>>1) & ~3_uz};
374 const size_t byteOffset{wordOffset*srcStep + ((nibbleOffset>>1)&3u)};
375 ++nibbleOffset;
377 const auto nval = (nibbleData[byteOffset]>>byteShift) & NibbleMask;
378 return static_cast<float>(decode_sample(al::to_underlying(nval))) / 32768.0f;
383 template<>
384 inline void LoadSamples<FmtMSADPCM>(al::span<float> dstSamples, al::span<const std::byte> src,
385 const size_t srcChan, const size_t srcOffset, const size_t srcStep,
386 const size_t samplesPerBlock) noexcept
388 assert(srcStep > 0 || srcStep <= 2);
389 assert(srcChan < srcStep);
390 assert(samplesPerBlock > 2);
391 const size_t blockBytes{((samplesPerBlock-2)/2 + 7)*srcStep};
393 src = src.subspan(srcOffset/samplesPerBlock*blockBytes);
394 size_t skip{srcOffset % samplesPerBlock};
396 auto dst = dstSamples.begin();
397 while(dst != dstSamples.end())
399 /* Each MS ADPCM block starts with an 8-bit block predictor, used to
400 * dictate how the two sample history values are mixed with the decoded
401 * sample, and an initial signed 16-bit delta value which scales the
402 * nibble sample value. This is followed by the two initial 16-bit
403 * sample history values.
405 const uint8_t blockpred{std::min(uint8_t(src[srcChan]), uint8_t{6})};
406 int delta{int(src[srcStep + 2*srcChan + 0]) | (int(src[srcStep + 2*srcChan + 1]) << 8)};
408 auto sampleHistory = std::array{
409 int(src[3*srcStep + 2*srcChan + 0]) | (int(src[3*srcStep + 2*srcChan + 1])<<8),
410 int(src[5*srcStep + 2*srcChan + 0]) | (int(src[5*srcStep + 2*srcChan + 1])<<8)};
411 const auto input = src.subspan(7*srcStep);
412 src = src.subspan(blockBytes);
414 const auto coeffs = al::span{MSADPCMAdaptionCoeff[blockpred]};
415 delta = (delta^0x8000) - 32768;
416 sampleHistory[0] = (sampleHistory[0]^0x8000) - 32768;
417 sampleHistory[1] = (sampleHistory[1]^0x8000) - 32768;
419 /* The second history sample is "older", so it's the first to be
420 * written out.
422 if(skip == 0)
424 *dst = static_cast<float>(sampleHistory[1]) / 32768.0f;
425 if(++dst == dstSamples.end()) return;
426 *dst = static_cast<float>(sampleHistory[0]) / 32768.0f;
427 if(++dst == dstSamples.end()) return;
429 else if(skip == 1)
431 --skip;
432 *dst = static_cast<float>(sampleHistory[0]) / 32768.0f;
433 if(++dst == dstSamples.end()) return;
435 else
436 skip -= 2;
438 auto decode_sample = [&sampleHistory,&delta,coeffs](const uint8_t nibble)
440 int pred{(sampleHistory[0]*coeffs[0] + sampleHistory[1]*coeffs[1]) / 256};
441 pred += ((nibble^0x08) - 0x08) * delta;
442 pred = std::clamp(pred, -32768, 32767);
444 sampleHistory[1] = sampleHistory[0];
445 sampleHistory[0] = pred;
447 delta = (MSADPCMAdaption[nibble] * delta) / 256;
448 delta = std::max(16, delta);
450 return pred;
453 /* The rest of the block is a series of nibbles, interleaved per-
454 * channel. First, skip samples.
456 static constexpr auto NibbleMask = std::byte{0xf};
457 const size_t startOffset{skip + 2};
458 size_t nibbleOffset{srcChan};
459 for(;skip;--skip)
461 const size_t byteOffset{nibbleOffset>>1};
462 const size_t byteShift{((nibbleOffset&1)^1) * 4};
463 nibbleOffset += srcStep;
465 const auto nval = (input[byteOffset]>>byteShift) & NibbleMask;
466 std::ignore = decode_sample(al::to_underlying(nval));
469 /* Now decode the rest of the block, until the end of the block or the
470 * dst buffer is filled.
472 const size_t todo{std::min(samplesPerBlock-startOffset, size_t(dstSamples.end()-dst))};
473 dst = std::generate_n(dst, todo, [&]
475 const size_t byteOffset{nibbleOffset>>1};
476 const size_t byteShift{((nibbleOffset&1)^1) * 4};
477 nibbleOffset += srcStep;
479 const auto nval = (input[byteOffset]>>byteShift) & NibbleMask;
480 return static_cast<float>(decode_sample(al::to_underlying(nval))) / 32768.0f;
485 void LoadSamples(const al::span<float> dstSamples, const al::span<const std::byte> src,
486 const size_t srcChan, const size_t srcOffset, const FmtType srcType, const size_t srcStep,
487 const size_t samplesPerBlock) noexcept
489 #define HANDLE_FMT(T) case T: \
490 LoadSamples<T>(dstSamples, src, srcChan, srcOffset, srcStep, \
491 samplesPerBlock); \
492 break
494 switch(srcType)
496 HANDLE_FMT(FmtUByte);
497 HANDLE_FMT(FmtShort);
498 HANDLE_FMT(FmtInt);
499 HANDLE_FMT(FmtFloat);
500 HANDLE_FMT(FmtDouble);
501 HANDLE_FMT(FmtMulaw);
502 HANDLE_FMT(FmtAlaw);
503 HANDLE_FMT(FmtIMA4);
504 HANDLE_FMT(FmtMSADPCM);
506 #undef HANDLE_FMT
509 void LoadBufferStatic(VoiceBufferItem *buffer, VoiceBufferItem *bufferLoopItem,
510 const size_t dataPosInt, const FmtType sampleType, const size_t srcChannel,
511 const size_t srcStep, al::span<float> voiceSamples)
513 if(!bufferLoopItem)
515 float lastSample{0.0f};
516 /* Load what's left to play from the buffer */
517 if(buffer->mSampleLen > dataPosInt) LIKELY
519 const size_t buffer_remaining{buffer->mSampleLen - dataPosInt};
520 const size_t remaining{std::min(voiceSamples.size(), buffer_remaining)};
521 LoadSamples(voiceSamples.first(remaining), buffer->mSamples, srcChannel, dataPosInt,
522 sampleType, srcStep, buffer->mBlockAlign);
523 lastSample = voiceSamples[remaining-1];
524 voiceSamples = voiceSamples.subspan(remaining);
527 if(const size_t toFill{voiceSamples.size()})
528 std::fill_n(voiceSamples.begin(), toFill, lastSample);
530 else
532 const size_t loopStart{buffer->mLoopStart};
533 const size_t loopEnd{buffer->mLoopEnd};
534 ASSUME(loopEnd > loopStart);
536 const size_t intPos{(dataPosInt < loopEnd) ? dataPosInt
537 : (((dataPosInt-loopStart)%(loopEnd-loopStart)) + loopStart)};
539 /* Load what's left of this loop iteration */
540 const size_t remaining{std::min(voiceSamples.size(), loopEnd-dataPosInt)};
541 LoadSamples(voiceSamples.first(remaining), buffer->mSamples, srcChannel, intPos,
542 sampleType, srcStep, buffer->mBlockAlign);
543 voiceSamples = voiceSamples.subspan(remaining);
545 /* Load repeats of the loop to fill the buffer. */
546 const size_t loopSize{loopEnd - loopStart};
547 while(const size_t toFill{std::min(voiceSamples.size(), loopSize)})
549 LoadSamples(voiceSamples.first(toFill), buffer->mSamples, srcChannel, loopStart,
550 sampleType, srcStep, buffer->mBlockAlign);
551 voiceSamples = voiceSamples.subspan(toFill);
556 void LoadBufferCallback(VoiceBufferItem *buffer, const size_t dataPosInt,
557 const size_t numCallbackSamples, const FmtType sampleType, const size_t srcChannel,
558 const size_t srcStep, al::span<float> voiceSamples)
560 float lastSample{0.0f};
561 if(numCallbackSamples > dataPosInt) LIKELY
563 const size_t remaining{std::min(voiceSamples.size(), numCallbackSamples-dataPosInt)};
564 LoadSamples(voiceSamples.first(remaining), buffer->mSamples, srcChannel, dataPosInt,
565 sampleType, srcStep, buffer->mBlockAlign);
566 lastSample = voiceSamples[remaining-1];
567 voiceSamples = voiceSamples.subspan(remaining);
570 if(const size_t toFill{voiceSamples.size()})
571 std::fill_n(voiceSamples.begin(), toFill, lastSample);
574 void LoadBufferQueue(VoiceBufferItem *buffer, VoiceBufferItem *bufferLoopItem,
575 size_t dataPosInt, const FmtType sampleType, const size_t srcChannel,
576 const size_t srcStep, al::span<float> voiceSamples)
578 float lastSample{0.0f};
579 /* Crawl the buffer queue to fill in the temp buffer */
580 while(buffer && !voiceSamples.empty())
582 if(dataPosInt >= buffer->mSampleLen)
584 dataPosInt -= buffer->mSampleLen;
585 buffer = buffer->mNext.load(std::memory_order_acquire);
586 if(!buffer) buffer = bufferLoopItem;
587 continue;
590 const size_t remaining{std::min(voiceSamples.size(), buffer->mSampleLen-dataPosInt)};
591 LoadSamples(voiceSamples.first(remaining), buffer->mSamples, srcChannel, dataPosInt,
592 sampleType, srcStep, buffer->mBlockAlign);
594 lastSample = voiceSamples[remaining-1];
595 voiceSamples = voiceSamples.subspan(remaining);
596 if(voiceSamples.empty())
597 break;
599 dataPosInt = 0;
600 buffer = buffer->mNext.load(std::memory_order_acquire);
601 if(!buffer) buffer = bufferLoopItem;
603 if(const size_t toFill{voiceSamples.size()})
604 std::fill_n(voiceSamples.begin(), toFill, lastSample);
608 void DoHrtfMix(const al::span<const float> samples, DirectParams &parms, const float TargetGain,
609 const size_t Counter, size_t OutPos, const bool IsPlaying, DeviceBase *Device)
611 const uint IrSize{Device->mIrSize};
612 const auto HrtfSamples = al::span{Device->ExtraSampleData};
613 const auto AccumSamples = al::span{Device->HrtfAccumData};
615 /* Copy the HRTF history and new input samples into a temp buffer. */
616 auto src_iter = std::copy(parms.Hrtf.History.begin(), parms.Hrtf.History.end(),
617 HrtfSamples.begin());
618 std::copy_n(samples.begin(), samples.size(), src_iter);
619 /* Copy the last used samples back into the history buffer for later. */
620 if(IsPlaying) LIKELY
622 const auto endsamples = HrtfSamples.subspan(samples.size(), parms.Hrtf.History.size());
623 std::copy_n(endsamples.cbegin(), endsamples.size(), parms.Hrtf.History.begin());
626 /* If fading and this is the first mixing pass, fade between the IRs. */
627 size_t fademix{0};
628 if(Counter && OutPos == 0)
630 fademix = std::min(samples.size(), Counter);
632 float gain{TargetGain};
634 /* The new coefficients need to fade in completely since they're
635 * replacing the old ones. To keep the gain fading consistent,
636 * interpolate between the old and new target gains given how much of
637 * the fade time this mix handles.
639 if(Counter > fademix)
641 const float a{static_cast<float>(fademix) / static_cast<float>(Counter)};
642 gain = lerpf(parms.Hrtf.Old.Gain, TargetGain, a);
645 MixHrtfFilter hrtfparams{
646 parms.Hrtf.Target.Coeffs,
647 parms.Hrtf.Target.Delay,
648 0.0f, gain / static_cast<float>(fademix)};
649 MixHrtfBlendSamples(HrtfSamples, AccumSamples.subspan(OutPos), IrSize, &parms.Hrtf.Old,
650 &hrtfparams, fademix);
652 /* Update the old parameters with the result. */
653 parms.Hrtf.Old = parms.Hrtf.Target;
654 parms.Hrtf.Old.Gain = gain;
655 OutPos += fademix;
658 if(fademix < samples.size())
660 const size_t todo{samples.size() - fademix};
661 float gain{TargetGain};
663 /* Interpolate the target gain if the gain fading lasts longer than
664 * this mix.
666 if(Counter > samples.size())
668 const float a{static_cast<float>(todo) / static_cast<float>(Counter-fademix)};
669 gain = lerpf(parms.Hrtf.Old.Gain, TargetGain, a);
672 MixHrtfFilter hrtfparams{
673 parms.Hrtf.Target.Coeffs,
674 parms.Hrtf.Target.Delay,
675 parms.Hrtf.Old.Gain,
676 (gain - parms.Hrtf.Old.Gain) / static_cast<float>(todo)};
677 MixHrtfSamples(HrtfSamples.subspan(fademix), AccumSamples.subspan(OutPos), IrSize,
678 &hrtfparams, todo);
680 /* Store the now-current gain for next time. */
681 parms.Hrtf.Old.Gain = gain;
685 void DoNfcMix(const al::span<const float> samples, al::span<FloatBufferLine> OutBuffer,
686 DirectParams &parms, const al::span<const float,MaxOutputChannels> OutGains,
687 const uint Counter, const uint OutPos, DeviceBase *Device)
689 using FilterProc = void (NfcFilter::*)(const al::span<const float>, const al::span<float>);
690 static constexpr std::array<FilterProc,MaxAmbiOrder+1> NfcProcess{{
691 nullptr, &NfcFilter::process1, &NfcFilter::process2, &NfcFilter::process3}};
693 MixSamples(samples, al::span{OutBuffer[0]}.subspan(OutPos), parms.Gains.Current[0],
694 OutGains[0], Counter);
695 OutBuffer = OutBuffer.subspan(1);
696 auto CurrentGains = al::span{parms.Gains.Current}.subspan(1);
697 auto TargetGains = OutGains.subspan(1);
699 const auto nfcsamples = al::span{Device->ExtraSampleData}.first(samples.size());
700 size_t order{1};
701 while(const size_t chancount{Device->NumChannelsPerOrder[order]})
703 (parms.NFCtrlFilter.*NfcProcess[order])(samples, nfcsamples);
704 MixSamples(nfcsamples, OutBuffer.first(chancount), CurrentGains, TargetGains, Counter,
705 OutPos);
706 if(++order == MaxAmbiOrder+1)
707 break;
708 OutBuffer = OutBuffer.subspan(chancount);
709 CurrentGains = CurrentGains.subspan(chancount);
710 TargetGains = TargetGains.subspan(chancount);
714 } // namespace
716 void Voice::mix(const State vstate, ContextBase *Context, const nanoseconds deviceTime,
717 const uint SamplesToDo)
719 static constexpr std::array<float,MaxOutputChannels> SilentTarget{};
721 ASSUME(SamplesToDo > 0);
723 DeviceBase *Device{Context->mDevice};
724 const uint NumSends{Device->NumAuxSends};
726 /* Get voice info */
727 int DataPosInt{mPosition.load(std::memory_order_relaxed)};
728 uint DataPosFrac{mPositionFrac.load(std::memory_order_relaxed)};
729 VoiceBufferItem *BufferListItem{mCurrentBuffer.load(std::memory_order_relaxed)};
730 VoiceBufferItem *BufferLoopItem{mLoopBuffer.load(std::memory_order_relaxed)};
731 const uint increment{mStep};
732 if(increment < 1) UNLIKELY
734 /* If the voice is supposed to be stopping but can't be mixed, just
735 * stop it before bailing.
737 if(vstate == Stopping)
738 mPlayState.store(Stopped, std::memory_order_release);
739 return;
742 /* If the static voice's current position is beyond the buffer loop end
743 * position, disable looping.
745 if(mFlags.test(VoiceIsStatic) && BufferLoopItem)
747 if(DataPosInt >= 0 && static_cast<uint>(DataPosInt) >= BufferListItem->mLoopEnd)
748 BufferLoopItem = nullptr;
751 uint OutPos{0u};
753 /* Check if we're doing a delayed start, and we start in this update. */
754 if(mStartTime > deviceTime) UNLIKELY
756 /* If the voice is supposed to be stopping but hasn't actually started
757 * yet, make sure its stopped.
759 if(vstate == Stopping)
761 mPlayState.store(Stopped, std::memory_order_release);
762 return;
765 /* If the start time is too far ahead, don't bother. */
766 auto diff = mStartTime - deviceTime;
767 if(diff >= seconds{1})
768 return;
770 /* Get the number of samples ahead of the current time that output
771 * should start at. Skip this update if it's beyond the output sample
772 * count.
774 OutPos = static_cast<uint>(round<seconds>(diff * Device->Frequency).count());
775 if(OutPos >= SamplesToDo) return;
778 /* Calculate the number of samples to mix, and the number of (resampled)
779 * samples that need to be loaded (mixing samples and decoder padding).
781 const uint samplesToMix{SamplesToDo - OutPos};
782 const uint samplesToLoad{samplesToMix + mDecoderPadding};
784 /* Get a span of pointers to hold the floating point, deinterlaced,
785 * resampled buffer data to be mixed.
787 auto SamplePointers = std::array<float*,DeviceBase::MixerChannelsMax>{};
788 const auto MixingSamples = al::span{SamplePointers}.first(mChans.size());
790 const uint channelStep{(samplesToLoad+3u)&~3u};
791 auto base = Device->mSampleData.end() - MixingSamples.size()*channelStep;
792 std::generate(MixingSamples.begin(), MixingSamples.end(), [&base,channelStep]
794 const auto ret = base;
795 base += channelStep;
796 return al::to_address(ret);
800 /* UHJ2 and SuperStereo only have 2 buffer channels, but 3 mixing channels
801 * (3rd channel is generated from decoding). MonoDup only has 1 buffer
802 * channel, but 2 mixing channels (2nd channel is just duplicated).
804 const size_t realChannels{(mFmtChannels == FmtMonoDup) ? 1u
805 : (mFmtChannels == FmtUHJ2 || mFmtChannels == FmtSuperStereo) ? 2u
806 : MixingSamples.size()};
807 for(size_t chan{0};chan < realChannels;++chan)
809 static constexpr uint ResBufSize{std::tuple_size_v<decltype(DeviceBase::mResampleData)>};
810 static constexpr uint srcSizeMax{ResBufSize - MaxResamplerEdge};
812 const al::span prevSamples{mPrevSamples[chan]};
813 std::copy(prevSamples.cbegin(), prevSamples.cend(), Device->mResampleData.begin());
814 const auto resampleBuffer = al::span{Device->mResampleData}.subspan<MaxResamplerEdge>();
815 int intPos{DataPosInt};
816 uint fracPos{DataPosFrac};
818 /* Load samples for this channel from the available buffer(s), with
819 * resampling.
821 for(uint samplesLoaded{0};samplesLoaded < samplesToLoad;)
823 /* Calculate the number of dst samples that can be loaded this
824 * iteration, given the available resampler buffer size, and the
825 * number of src samples that are needed to load it.
827 auto calc_buffer_sizes = [fracPos,increment](uint dstBufferSize)
829 /* If ext=true, calculate the last written dst pos from the dst
830 * count, convert to the last read src pos, then add one to get
831 * the src count.
833 * If ext=false, convert the dst count to src count directly.
835 * Without this, the src count could be short by one when
836 * increment < 1.0, or not have a full src at the end when
837 * increment > 1.0.
839 const bool ext{increment <= MixerFracOne};
840 uint64_t dataSize64{dstBufferSize - ext};
841 dataSize64 = (dataSize64*increment + fracPos) >> MixerFracBits;
842 /* Also include resampler padding. */
843 dataSize64 += ext + MaxResamplerEdge;
845 if(dataSize64 <= srcSizeMax)
846 return std::make_pair(dstBufferSize, static_cast<uint>(dataSize64));
848 /* If the source size got saturated, we can't fill the desired
849 * dst size. Figure out how many dst samples we can fill.
851 dataSize64 = srcSizeMax - MaxResamplerEdge;
852 dataSize64 = ((dataSize64<<MixerFracBits) - fracPos) / increment;
853 if(dataSize64 < dstBufferSize)
855 /* Some resamplers require the destination being 16-byte
856 * aligned, so limit to a multiple of 4 samples to maintain
857 * alignment if we need to do another iteration after this.
859 dstBufferSize = static_cast<uint>(dataSize64) & ~3u;
861 return std::make_pair(dstBufferSize, srcSizeMax);
863 const auto [dstBufferSize, srcBufferSize] = calc_buffer_sizes(
864 samplesToLoad - samplesLoaded);
866 size_t srcSampleDelay{0};
867 if(intPos < 0) UNLIKELY
869 /* If the current position is negative, there's that many
870 * silent samples to load before using the buffer.
872 srcSampleDelay = static_cast<uint>(-intPos);
873 if(srcSampleDelay >= srcBufferSize)
875 /* If the number of silent source samples exceeds the
876 * number to load, the output will be silent.
878 std::fill_n(MixingSamples[chan]+samplesLoaded, dstBufferSize, 0.0f);
879 std::fill_n(resampleBuffer.begin(), srcBufferSize, 0.0f);
880 goto skip_resample;
883 std::fill_n(resampleBuffer.begin(), srcSampleDelay, 0.0f);
886 /* Load the necessary samples from the given buffer(s). */
887 if(!BufferListItem) UNLIKELY
889 const uint avail{std::min(srcBufferSize, MaxResamplerEdge)};
890 const uint tofill{std::max(srcBufferSize, MaxResamplerEdge)};
891 const auto srcbuf = resampleBuffer.first(tofill);
893 /* When loading from a voice that ended prematurely, only take
894 * the samples that get closest to 0 amplitude. This helps
895 * certain sounds fade out better.
897 auto srciter = std::min_element(srcbuf.begin(), srcbuf.begin()+ptrdiff_t(avail),
898 [](const float l, const float r) { return std::abs(l) < std::abs(r); });
900 std::fill(srciter+1, srcbuf.end(), *srciter);
902 else if(mFlags.test(VoiceIsStatic))
904 const auto uintPos = static_cast<uint>(std::max(intPos, 0));
905 const auto bufferSamples = resampleBuffer.subspan(srcSampleDelay,
906 srcBufferSize-srcSampleDelay);
907 LoadBufferStatic(BufferListItem, BufferLoopItem, uintPos, mFmtType, chan,
908 mFrameStep, bufferSamples);
910 else if(mFlags.test(VoiceIsCallback))
912 const auto uintPos = static_cast<uint>(std::max(intPos, 0));
913 const uint callbackBase{mCallbackBlockBase * mSamplesPerBlock};
914 const size_t bufferOffset{uintPos - callbackBase};
915 const size_t needSamples{bufferOffset + srcBufferSize - srcSampleDelay};
916 const size_t needBlocks{(needSamples + mSamplesPerBlock-1) / mSamplesPerBlock};
917 if(!mFlags.test(VoiceCallbackStopped) && needBlocks > mNumCallbackBlocks)
919 const size_t byteOffset{mNumCallbackBlocks*size_t{mBytesPerBlock}};
920 const size_t needBytes{(needBlocks-mNumCallbackBlocks)*size_t{mBytesPerBlock}};
922 const int gotBytes{BufferListItem->mCallback(BufferListItem->mUserData,
923 &BufferListItem->mSamples[byteOffset], static_cast<int>(needBytes))};
924 if(gotBytes < 0)
925 mFlags.set(VoiceCallbackStopped);
926 else if(static_cast<uint>(gotBytes) < needBytes)
928 mFlags.set(VoiceCallbackStopped);
929 mNumCallbackBlocks += static_cast<uint>(gotBytes) / mBytesPerBlock;
931 else
932 mNumCallbackBlocks = static_cast<uint>(needBlocks);
934 const size_t numSamples{size_t{mNumCallbackBlocks} * mSamplesPerBlock};
935 const auto bufferSamples = resampleBuffer.subspan(srcSampleDelay,
936 srcBufferSize-srcSampleDelay);
937 LoadBufferCallback(BufferListItem, bufferOffset, numSamples, mFmtType, chan,
938 mFrameStep, bufferSamples);
940 else
942 const auto uintPos = static_cast<uint>(std::max(intPos, 0));
943 const auto bufferSamples = resampleBuffer.subspan(srcSampleDelay,
944 srcBufferSize-srcSampleDelay);
945 LoadBufferQueue(BufferListItem, BufferLoopItem, uintPos, mFmtType, chan,
946 mFrameStep, bufferSamples);
949 /* If there's a matching sample step and no phase offset, use a
950 * simple copy for resampling.
952 if(increment == MixerFracOne && fracPos == 0)
953 std::copy_n(resampleBuffer.cbegin(), dstBufferSize,
954 MixingSamples[chan]+samplesLoaded);
955 else
956 mResampler(&mResampleState, Device->mResampleData, fracPos, increment,
957 {MixingSamples[chan]+samplesLoaded, dstBufferSize});
959 /* Store the last source samples used for next time. */
960 if(vstate == Playing) LIKELY
962 /* Only store samples for the end of the mix, excluding what
963 * gets loaded for decoder padding.
965 const uint loadEnd{samplesLoaded + dstBufferSize};
966 if(samplesToMix > samplesLoaded && samplesToMix <= loadEnd) LIKELY
968 const size_t dstOffset{samplesToMix - samplesLoaded};
969 const size_t srcOffset{(dstOffset*increment + fracPos) >> MixerFracBits};
970 std::copy_n(Device->mResampleData.cbegin()+srcOffset, prevSamples.size(),
971 prevSamples.begin());
975 skip_resample:
976 samplesLoaded += dstBufferSize;
977 if(samplesLoaded < samplesToLoad)
979 fracPos += dstBufferSize*increment;
980 const uint srcOffset{fracPos >> MixerFracBits};
981 fracPos &= MixerFracMask;
982 intPos += static_cast<int>(srcOffset);
984 /* If more samples need to be loaded, copy the back of the
985 * resampleBuffer to the front to reuse it. prevSamples isn't
986 * reliable since it's only updated for the end of the mix.
988 std::copy_n(Device->mResampleData.cbegin()+srcOffset, MaxResamplerPadding,
989 Device->mResampleData.begin());
993 if(mFmtChannels == FmtMonoDup)
995 /* NOTE: a mono source shouldn't have a decoder or the VoiceIsAmbisonic
996 * flag, so aliasing instead of copying to the second channel shouldn't
997 * be a problem.
999 MixingSamples[1] = MixingSamples[0];
1001 else for(auto &samples : MixingSamples.subspan(realChannels))
1002 std::fill_n(samples, samplesToLoad, 0.0f);
1004 if(mDecoder)
1005 mDecoder->decode(MixingSamples, samplesToMix, (vstate==Playing));
1007 if(mFlags.test(VoiceIsAmbisonic))
1009 auto voiceSamples = MixingSamples.begin();
1010 for(auto &chandata : mChans)
1012 chandata.mAmbiSplitter.processScale({*voiceSamples, samplesToMix},
1013 chandata.mAmbiHFScale, chandata.mAmbiLFScale);
1014 ++voiceSamples;
1018 const uint Counter{mFlags.test(VoiceIsFading) ? std::min(samplesToMix, 64u) : 0u};
1019 if(!Counter)
1021 /* No fading, just overwrite the old/current params. */
1022 for(auto &chandata : mChans)
1025 DirectParams &parms = chandata.mDryParams;
1026 if(!mFlags.test(VoiceHasHrtf))
1027 parms.Gains.Current = parms.Gains.Target;
1028 else
1029 parms.Hrtf.Old = parms.Hrtf.Target;
1031 for(uint send{0};send < NumSends;++send)
1033 if(mSend[send].Buffer.empty())
1034 continue;
1036 SendParams &parms = chandata.mWetParams[send];
1037 parms.Gains.Current = parms.Gains.Target;
1042 auto voiceSamples = MixingSamples.begin();
1043 for(auto &chandata : mChans)
1045 /* Now filter and mix to the appropriate outputs. */
1046 const al::span<float,BufferLineSize> FilterBuf{Device->FilteredData};
1048 DirectParams &parms = chandata.mDryParams;
1049 const auto samples = DoFilters(parms.LowPass, parms.HighPass, FilterBuf,
1050 {*voiceSamples, samplesToMix}, mDirect.FilterType);
1052 if(mFlags.test(VoiceHasHrtf))
1054 const float TargetGain{parms.Hrtf.Target.Gain * float(vstate == Playing)};
1055 DoHrtfMix(samples, parms, TargetGain, Counter, OutPos, (vstate == Playing),
1056 Device);
1058 else
1060 const auto TargetGains = (vstate == Playing) ? al::span{parms.Gains.Target}
1061 : al::span{SilentTarget};
1062 if(mFlags.test(VoiceHasNfc))
1063 DoNfcMix(samples, mDirect.Buffer, parms, TargetGains, Counter, OutPos, Device);
1064 else
1065 MixSamples(samples, mDirect.Buffer, parms.Gains.Current, TargetGains, Counter,
1066 OutPos);
1070 for(uint send{0};send < NumSends;++send)
1072 if(mSend[send].Buffer.empty())
1073 continue;
1075 SendParams &parms = chandata.mWetParams[send];
1076 const auto samples = DoFilters(parms.LowPass, parms.HighPass, FilterBuf,
1077 {*voiceSamples, samplesToMix}, mSend[send].FilterType);
1079 const auto TargetGains = (vstate == Playing) ? al::span{parms.Gains.Target}
1080 : al::span{SilentTarget};
1081 MixSamples(samples, mSend[send].Buffer, parms.Gains.Current, TargetGains, Counter,
1082 OutPos);
1085 ++voiceSamples;
1088 mFlags.set(VoiceIsFading);
1090 /* Don't update positions and buffers if we were stopping. */
1091 if(vstate == Stopping) UNLIKELY
1093 mPlayState.store(Stopped, std::memory_order_release);
1094 return;
1097 /* Update voice positions and buffers as needed. */
1098 DataPosFrac += increment*samplesToMix;
1099 DataPosInt += static_cast<int>(DataPosFrac>>MixerFracBits);
1100 DataPosFrac &= MixerFracMask;
1102 uint buffers_done{0u};
1103 if(BufferListItem && DataPosInt > 0) LIKELY
1105 if(mFlags.test(VoiceIsStatic))
1107 if(BufferLoopItem)
1109 /* Handle looping static source */
1110 const uint LoopStart{BufferListItem->mLoopStart};
1111 const uint LoopEnd{BufferListItem->mLoopEnd};
1112 uint DataPosUInt{static_cast<uint>(DataPosInt)};
1113 if(DataPosUInt >= LoopEnd)
1115 assert(LoopEnd > LoopStart);
1116 DataPosUInt = ((DataPosUInt-LoopStart)%(LoopEnd-LoopStart)) + LoopStart;
1117 DataPosInt = static_cast<int>(DataPosUInt);
1120 else
1122 /* Handle non-looping static source */
1123 if(static_cast<uint>(DataPosInt) >= BufferListItem->mSampleLen)
1124 BufferListItem = nullptr;
1127 else if(mFlags.test(VoiceIsCallback))
1129 /* Handle callback buffer source */
1130 const uint currentBlock{static_cast<uint>(DataPosInt) / mSamplesPerBlock};
1131 const uint blocksDone{currentBlock - mCallbackBlockBase};
1132 if(blocksDone < mNumCallbackBlocks)
1134 const size_t byteOffset{blocksDone*size_t{mBytesPerBlock}};
1135 const size_t byteEnd{mNumCallbackBlocks*size_t{mBytesPerBlock}};
1136 const al::span data{BufferListItem->mSamples};
1137 std::copy(data.cbegin()+ptrdiff_t(byteOffset), data.cbegin()+ptrdiff_t(byteEnd),
1138 data.begin());
1139 mNumCallbackBlocks -= blocksDone;
1140 mCallbackBlockBase += blocksDone;
1142 else
1144 BufferListItem = nullptr;
1145 mNumCallbackBlocks = 0;
1146 mCallbackBlockBase += blocksDone;
1149 else
1151 /* Handle streaming source */
1152 do {
1153 if(BufferListItem->mSampleLen > static_cast<uint>(DataPosInt))
1154 break;
1156 DataPosInt -= static_cast<int>(BufferListItem->mSampleLen);
1158 ++buffers_done;
1159 BufferListItem = BufferListItem->mNext.load(std::memory_order_relaxed);
1160 if(!BufferListItem) BufferListItem = BufferLoopItem;
1161 } while(BufferListItem);
1165 /* Capture the source ID in case it gets reset for stopping. */
1166 const uint SourceID{mSourceID.load(std::memory_order_relaxed)};
1168 /* Update voice info */
1169 mPosition.store(DataPosInt, std::memory_order_relaxed);
1170 mPositionFrac.store(DataPosFrac, std::memory_order_relaxed);
1171 mCurrentBuffer.store(BufferListItem, std::memory_order_relaxed);
1172 if(!BufferListItem)
1174 mLoopBuffer.store(nullptr, std::memory_order_relaxed);
1175 mSourceID.store(0u, std::memory_order_relaxed);
1177 std::atomic_thread_fence(std::memory_order_release);
1179 /* Send any events now, after the position/buffer info was updated. */
1180 const auto enabledevt = Context->mEnabledEvts.load(std::memory_order_acquire);
1181 if(buffers_done > 0 && enabledevt.test(al::to_underlying(AsyncEnableBits::BufferCompleted)))
1183 RingBuffer *ring{Context->mAsyncEvents.get()};
1184 auto evt_vec = ring->getWriteVector();
1185 if(evt_vec.first.len > 0)
1187 auto &evt = InitAsyncEvent<AsyncBufferCompleteEvent>(evt_vec.first.buf);
1188 evt.mId = SourceID;
1189 evt.mCount = buffers_done;
1190 ring->writeAdvance(1);
1194 if(!BufferListItem)
1196 /* If the voice just ended, set it to Stopping so the next render
1197 * ensures any residual noise fades to 0 amplitude.
1199 mPlayState.store(Stopping, std::memory_order_release);
1200 if(enabledevt.test(al::to_underlying(AsyncEnableBits::SourceState)))
1201 SendSourceStoppedEvent(Context, SourceID);
1205 void Voice::prepare(DeviceBase *device)
1207 /* Even if storing really high order ambisonics, we only mix channels for
1208 * orders up to the device order. The rest are simply dropped.
1210 uint num_channels{(mFmtChannels == FmtMonoDup) ? 2
1211 : (mFmtChannels == FmtUHJ2 || mFmtChannels == FmtSuperStereo) ? 3
1212 : ChannelsFromFmt(mFmtChannels, std::min(mAmbiOrder, device->mAmbiOrder))};
1213 if(num_channels > device->MixerChannelsMax) UNLIKELY
1215 ERR("Unexpected channel count: %u (limit: %zu, %s : %d)\n", num_channels,
1216 device->MixerChannelsMax, NameFromFormat(mFmtChannels), mAmbiOrder);
1217 num_channels = device->MixerChannelsMax;
1219 if(mChans.capacity() > 2 && num_channels < mChans.capacity())
1221 decltype(mChans){}.swap(mChans);
1222 decltype(mPrevSamples){}.swap(mPrevSamples);
1224 mChans.reserve(std::max(2u, num_channels));
1225 mChans.resize(num_channels);
1226 mPrevSamples.reserve(std::max(2u, num_channels));
1227 mPrevSamples.resize(num_channels);
1229 mDecoder = nullptr;
1230 mDecoderPadding = 0;
1231 if(mFmtChannels == FmtSuperStereo)
1233 switch(UhjDecodeQuality)
1235 case UhjQualityType::IIR:
1236 mDecoder = std::make_unique<UhjStereoDecoderIIR>();
1237 mDecoderPadding = UhjStereoDecoderIIR::sInputPadding;
1238 break;
1239 case UhjQualityType::FIR256:
1240 mDecoder = std::make_unique<UhjStereoDecoder<UhjLength256>>();
1241 mDecoderPadding = UhjStereoDecoder<UhjLength256>::sInputPadding;
1242 break;
1243 case UhjQualityType::FIR512:
1244 mDecoder = std::make_unique<UhjStereoDecoder<UhjLength512>>();
1245 mDecoderPadding = UhjStereoDecoder<UhjLength512>::sInputPadding;
1246 break;
1249 else if(IsUHJ(mFmtChannels))
1251 switch(UhjDecodeQuality)
1253 case UhjQualityType::IIR:
1254 mDecoder = std::make_unique<UhjDecoderIIR>();
1255 mDecoderPadding = UhjDecoderIIR::sInputPadding;
1256 break;
1257 case UhjQualityType::FIR256:
1258 mDecoder = std::make_unique<UhjDecoder<UhjLength256>>();
1259 mDecoderPadding = UhjDecoder<UhjLength256>::sInputPadding;
1260 break;
1261 case UhjQualityType::FIR512:
1262 mDecoder = std::make_unique<UhjDecoder<UhjLength512>>();
1263 mDecoderPadding = UhjDecoder<UhjLength512>::sInputPadding;
1264 break;
1268 /* Clear the stepping value explicitly so the mixer knows not to mix this
1269 * until the update gets applied.
1271 mStep = 0;
1273 /* Make sure the sample history is cleared. */
1274 std::fill(mPrevSamples.begin(), mPrevSamples.end(), HistoryLine{});
1276 if(mFmtChannels == FmtUHJ2 && !device->mUhjEncoder)
1278 /* 2-channel UHJ needs different shelf filters. However, we can't just
1279 * use different shelf filters after mixing it, given any old speaker
1280 * setup the user has. To make this work, we apply the expected shelf
1281 * filters for decoding UHJ2 to quad (only needs LF scaling), and act
1282 * as if those 4 quad channels are encoded right back into B-Format.
1284 * This isn't perfect, but without an entirely separate and limited
1285 * UHJ2 path, it's better than nothing.
1287 * Note this isn't needed with UHJ output (UHJ2->B-Format->UHJ2 is
1288 * identity, so don't mess with it).
1290 const BandSplitter splitter{device->mXOverFreq / static_cast<float>(device->Frequency)};
1291 for(auto &chandata : mChans)
1293 chandata.mAmbiHFScale = 1.0f;
1294 chandata.mAmbiLFScale = 1.0f;
1295 chandata.mAmbiSplitter = splitter;
1296 chandata.mDryParams = DirectParams{};
1297 chandata.mDryParams.NFCtrlFilter = device->mNFCtrlFilter;
1298 std::fill_n(chandata.mWetParams.begin(), device->NumAuxSends, SendParams{});
1300 mChans[0].mAmbiLFScale = DecoderBase::sWLFScale;
1301 mChans[1].mAmbiLFScale = DecoderBase::sXYLFScale;
1302 mChans[2].mAmbiLFScale = DecoderBase::sXYLFScale;
1303 mFlags.set(VoiceIsAmbisonic);
1305 /* Don't need to set the VoiceIsAmbisonic flag if the device is not higher
1306 * order than the voice. No HF scaling is necessary to mix it.
1308 else if(mAmbiOrder && device->mAmbiOrder > mAmbiOrder)
1310 auto OrdersSpan = Is2DAmbisonic(mFmtChannels)
1311 ? al::span<const uint8_t>{AmbiIndex::OrderFrom2DChannel}
1312 : al::span<const uint8_t>{AmbiIndex::OrderFromChannel};
1313 auto OrderFromChan = OrdersSpan.cbegin();
1314 const auto scales = AmbiScale::GetHFOrderScales(mAmbiOrder, device->mAmbiOrder,
1315 device->m2DMixing);
1317 const BandSplitter splitter{device->mXOverFreq / static_cast<float>(device->Frequency)};
1318 for(auto &chandata : mChans)
1320 chandata.mAmbiHFScale = scales[*(OrderFromChan++)];
1321 chandata.mAmbiLFScale = 1.0f;
1322 chandata.mAmbiSplitter = splitter;
1323 chandata.mDryParams = DirectParams{};
1324 chandata.mDryParams.NFCtrlFilter = device->mNFCtrlFilter;
1325 std::fill_n(chandata.mWetParams.begin(), device->NumAuxSends, SendParams{});
1327 mFlags.set(VoiceIsAmbisonic);
1329 else
1331 for(auto &chandata : mChans)
1333 chandata.mDryParams = DirectParams{};
1334 chandata.mDryParams.NFCtrlFilter = device->mNFCtrlFilter;
1335 std::fill_n(chandata.mWetParams.begin(), device->NumAuxSends, SendParams{});
1337 mFlags.reset(VoiceIsAmbisonic);