Use filebuf instead of ifstream in allafplay
[openal-soft.git] / core / voice.cpp
blobaa2417362a82a65862446815be4b68fa2339c71a
2 #include "config.h"
3 #include "config_simd.h"
5 #include "voice.h"
7 #include <algorithm>
8 #include <array>
9 #include <atomic>
10 #include <cassert>
11 #include <climits>
12 #include <cstdint>
13 #include <cstdlib>
14 #include <iterator>
15 #include <memory>
16 #include <new>
17 #include <optional>
18 #include <utility>
19 #include <vector>
21 #include "alnumeric.h"
22 #include "alspan.h"
23 #include "alstring.h"
24 #include "ambidefs.h"
25 #include "async_event.h"
26 #include "buffer_storage.h"
27 #include "context.h"
28 #include "cpu_caps.h"
29 #include "devformat.h"
30 #include "device.h"
31 #include "filters/biquad.h"
32 #include "filters/nfc.h"
33 #include "filters/splitter.h"
34 #include "fmt_traits.h"
35 #include "logging.h"
36 #include "mixer.h"
37 #include "mixer/defs.h"
38 #include "mixer/hrtfdefs.h"
39 #include "opthelpers.h"
40 #include "resampler_limits.h"
41 #include "ringbuffer.h"
42 #include "vector.h"
43 #include "voice_change.h"
45 struct CTag;
46 #if HAVE_SSE
47 struct SSETag;
48 #endif
49 #if HAVE_NEON
50 struct NEONTag;
51 #endif
54 static_assert(!(DeviceBase::MixerLineSize&3), "MixerLineSize must be a multiple of 4");
55 static_assert(!(MaxResamplerEdge&3), "MaxResamplerEdge is not a multiple of 4");
57 static_assert((BufferLineSize-1)/MaxPitch > 0, "MaxPitch is too large for BufferLineSize!");
58 static_assert((INT_MAX>>MixerFracBits)/MaxPitch > BufferLineSize,
59 "MaxPitch and/or BufferLineSize are too large for MixerFracBits!");
61 namespace {
63 using uint = unsigned int;
64 using namespace std::chrono;
65 using namespace std::string_view_literals;
67 using HrtfMixerFunc = void(*)(const al::span<const float> InSamples,
68 const al::span<float2> AccumSamples, const uint IrSize, const MixHrtfFilter *hrtfparams,
69 const size_t SamplesToDo);
70 using HrtfMixerBlendFunc = void(*)(const al::span<const float> InSamples,
71 const al::span<float2> AccumSamples, const uint IrSize, const HrtfFilter *oldparams,
72 const MixHrtfFilter *newparams, const size_t SamplesToDo);
74 HrtfMixerFunc MixHrtfSamples{MixHrtf_<CTag>};
75 HrtfMixerBlendFunc MixHrtfBlendSamples{MixHrtfBlend_<CTag>};
77 inline MixerOutFunc SelectMixer()
79 #if HAVE_NEON
80 if((CPUCapFlags&CPU_CAP_NEON))
81 return Mix_<NEONTag>;
82 #endif
83 #if HAVE_SSE
84 if((CPUCapFlags&CPU_CAP_SSE))
85 return Mix_<SSETag>;
86 #endif
87 return Mix_<CTag>;
90 inline MixerOneFunc SelectMixerOne()
92 #if HAVE_NEON
93 if((CPUCapFlags&CPU_CAP_NEON))
94 return Mix_<NEONTag>;
95 #endif
96 #if HAVE_SSE
97 if((CPUCapFlags&CPU_CAP_SSE))
98 return Mix_<SSETag>;
99 #endif
100 return Mix_<CTag>;
103 inline HrtfMixerFunc SelectHrtfMixer()
105 #if HAVE_NEON
106 if((CPUCapFlags&CPU_CAP_NEON))
107 return MixHrtf_<NEONTag>;
108 #endif
109 #if HAVE_SSE
110 if((CPUCapFlags&CPU_CAP_SSE))
111 return MixHrtf_<SSETag>;
112 #endif
113 return MixHrtf_<CTag>;
116 inline HrtfMixerBlendFunc SelectHrtfBlendMixer()
118 #if HAVE_NEON
119 if((CPUCapFlags&CPU_CAP_NEON))
120 return MixHrtfBlend_<NEONTag>;
121 #endif
122 #if HAVE_SSE
123 if((CPUCapFlags&CPU_CAP_SSE))
124 return MixHrtfBlend_<SSETag>;
125 #endif
126 return MixHrtfBlend_<CTag>;
129 } // namespace
131 void Voice::InitMixer(std::optional<std::string> resopt)
133 if(resopt)
135 struct ResamplerEntry {
136 const std::string_view name;
137 const Resampler resampler;
139 constexpr std::array ResamplerList{
140 ResamplerEntry{"none"sv, Resampler::Point},
141 ResamplerEntry{"point"sv, Resampler::Point},
142 ResamplerEntry{"linear"sv, Resampler::Linear},
143 ResamplerEntry{"spline"sv, Resampler::Spline},
144 ResamplerEntry{"gaussian"sv, Resampler::Gaussian},
145 ResamplerEntry{"bsinc12"sv, Resampler::BSinc12},
146 ResamplerEntry{"fast_bsinc12"sv, Resampler::FastBSinc12},
147 ResamplerEntry{"bsinc24"sv, Resampler::BSinc24},
148 ResamplerEntry{"fast_bsinc24"sv, Resampler::FastBSinc24},
151 std::string_view resampler{*resopt};
153 if (al::case_compare(resampler, "cubic"sv) == 0)
155 WARN("Resampler option \"%s\" is deprecated, using spline\n", resopt->c_str());
156 resampler = "spline"sv;
158 else if(al::case_compare(resampler, "sinc4"sv) == 0
159 || al::case_compare(resampler, "sinc8"sv) == 0)
161 WARN("Resampler option \"%s\" is deprecated, using gaussian\n", resopt->c_str());
162 resampler = "gaussian"sv;
164 else if(al::case_compare(resampler, "bsinc"sv) == 0)
166 WARN("Resampler option \"%s\" is deprecated, using bsinc12\n", resopt->c_str());
167 resampler = "bsinc12"sv;
170 auto iter = std::find_if(ResamplerList.begin(), ResamplerList.end(),
171 [resampler](const ResamplerEntry &entry) -> bool
172 { return al::case_compare(resampler, entry.name) == 0; });
173 if(iter == ResamplerList.end())
174 ERR("Invalid resampler: %s\n", resopt->c_str());
175 else
176 ResamplerDefault = iter->resampler;
179 MixSamplesOut = SelectMixer();
180 MixSamplesOne = SelectMixerOne();
181 MixHrtfBlendSamples = SelectHrtfBlendMixer();
182 MixHrtfSamples = SelectHrtfMixer();
186 namespace {
188 /* IMA ADPCM Stepsize table */
189 constexpr std::array<int,89> IMAStep_size{{
190 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 19,
191 21, 23, 25, 28, 31, 34, 37, 41, 45, 50, 55,
192 60, 66, 73, 80, 88, 97, 107, 118, 130, 143, 157,
193 173, 190, 209, 230, 253, 279, 307, 337, 371, 408, 449,
194 494, 544, 598, 658, 724, 796, 876, 963, 1060, 1166, 1282,
195 1411, 1552, 1707, 1878, 2066, 2272, 2499, 2749, 3024, 3327, 3660,
196 4026, 4428, 4871, 5358, 5894, 6484, 7132, 7845, 8630, 9493,10442,
197 11487,12635,13899,15289,16818,18500,20350,22358,24633,27086,29794,
198 32767
201 /* IMA4 ADPCM Codeword decode table */
202 constexpr std::array<int,16> IMA4Codeword{{
203 1, 3, 5, 7, 9, 11, 13, 15,
204 -1,-3,-5,-7,-9,-11,-13,-15,
207 /* IMA4 ADPCM Step index adjust decode table */
208 constexpr std::array<int,16>IMA4Index_adjust{{
209 -1,-1,-1,-1, 2, 4, 6, 8,
210 -1,-1,-1,-1, 2, 4, 6, 8
213 /* MSADPCM Adaption table */
214 constexpr std::array<int,16> MSADPCMAdaption{{
215 230, 230, 230, 230, 307, 409, 512, 614,
216 768, 614, 512, 409, 307, 230, 230, 230
219 /* MSADPCM Adaption Coefficient tables */
220 constexpr std::array MSADPCMAdaptionCoeff{
221 std::array{256, 0},
222 std::array{512, -256},
223 std::array{ 0, 0},
224 std::array{192, 64},
225 std::array{240, 0},
226 std::array{460, -208},
227 std::array{392, -232}
231 void SendSourceStoppedEvent(ContextBase *context, uint id)
233 RingBuffer *ring{context->mAsyncEvents.get()};
234 auto evt_vec = ring->getWriteVector();
235 if(evt_vec[0].len < 1) return;
237 auto &evt = InitAsyncEvent<AsyncSourceStateEvent>(evt_vec[0].buf);
238 evt.mId = id;
239 evt.mState = AsyncSrcState::Stop;
241 ring->writeAdvance(1);
245 al::span<const float> DoFilters(BiquadFilter &lpfilter, BiquadFilter &hpfilter,
246 const al::span<float,BufferLineSize> dst, const al::span<const float> src, int type)
248 switch(type)
250 case AF_None:
251 lpfilter.clear();
252 hpfilter.clear();
253 break;
255 case AF_LowPass:
256 lpfilter.process(src, dst);
257 hpfilter.clear();
258 return dst.first(src.size());
259 case AF_HighPass:
260 lpfilter.clear();
261 hpfilter.process(src, dst);
262 return dst.first(src.size());
264 case AF_BandPass:
265 DualBiquad{lpfilter, hpfilter}.process(src, dst);
266 return dst.first(src.size());
268 return src;
272 template<FmtType Type>
273 inline void LoadSamples(const al::span<float> dstSamples, const al::span<const std::byte> srcData,
274 const size_t srcChan, const size_t srcOffset, const size_t srcStep,
275 const size_t samplesPerBlock [[maybe_unused]]) noexcept
277 using TypeTraits = al::FmtTypeTraits<Type>;
278 using SampleType = typename TypeTraits::Type;
279 static constexpr size_t sampleSize{sizeof(SampleType)};
280 assert(srcChan < srcStep);
281 auto converter = TypeTraits{};
283 al::span<const SampleType> src{reinterpret_cast<const SampleType*>(srcData.data()),
284 srcData.size()/sampleSize};
285 auto ssrc = src.cbegin() + ptrdiff_t(srcOffset*srcStep);
286 std::generate(dstSamples.begin(), dstSamples.end(), [&ssrc,srcChan,srcStep,converter]
288 auto ret = converter(ssrc[srcChan]);
289 ssrc += ptrdiff_t(srcStep);
290 return ret;
294 template<>
295 inline void LoadSamples<FmtIMA4>(al::span<float> dstSamples, al::span<const std::byte> src,
296 const size_t srcChan, const size_t srcOffset, const size_t srcStep,
297 const size_t samplesPerBlock) noexcept
299 static constexpr int MaxStepIndex{static_cast<int>(IMAStep_size.size()) - 1};
301 assert(srcStep > 0 || srcStep <= 2);
302 assert(srcChan < srcStep);
303 assert(samplesPerBlock > 1);
304 const size_t blockBytes{((samplesPerBlock-1)/2 + 4)*srcStep};
306 /* Skip to the ADPCM block containing the srcOffset sample. */
307 src = src.subspan(srcOffset/samplesPerBlock*blockBytes);
308 /* Calculate how many samples need to be skipped in the block. */
309 size_t skip{srcOffset % samplesPerBlock};
311 /* NOTE: This could probably be optimized better. */
312 auto dst = dstSamples.begin();
313 while(dst != dstSamples.end())
315 /* Each IMA4 block starts with a signed 16-bit sample, and a signed
316 * 16-bit table index. The table index needs to be clamped.
318 int sample{int(src[srcChan*4 + 0]) | (int(src[srcChan*4 + 1]) << 8)};
319 int index{int(src[srcChan*4 + 2]) | (int(src[srcChan*4 + 3]) << 8)};
320 auto nibbleData = src.subspan((srcStep+srcChan)*4);
321 src = src.subspan(blockBytes);
323 sample = (sample^0x8000) - 32768;
324 index = std::clamp((index^0x8000) - 32768, 0, MaxStepIndex);
326 if(skip == 0)
328 *dst = static_cast<float>(sample) / 32768.0f;
329 if(++dst == dstSamples.end()) return;
331 else
332 --skip;
334 auto decode_sample = [&sample,&index](const uint8_t nibble)
336 sample += IMA4Codeword[nibble] * IMAStep_size[static_cast<uint>(index)] / 8;
337 sample = std::clamp(sample, -32768, 32767);
339 index += IMA4Index_adjust[nibble];
340 index = std::clamp(index, 0, MaxStepIndex);
342 return sample;
345 /* The rest of the block is arranged as a series of nibbles, contained
346 * in 4 *bytes* per channel interleaved. So every 8 nibbles we need to
347 * skip 4 bytes per channel to get the next nibbles for this channel.
349 * First, decode the samples that we need to skip in the block (will
350 * always be less than the block size). They need to be decoded despite
351 * being ignored for proper state on the remaining samples.
353 static constexpr auto NibbleMask = std::byte{0xf};
354 size_t nibbleOffset{0};
355 const size_t startOffset{skip + 1};
356 for(;skip;--skip)
358 const size_t byteShift{(nibbleOffset&1) * 4};
359 const size_t wordOffset{(nibbleOffset>>1) & ~3_uz};
360 const size_t byteOffset{wordOffset*srcStep + ((nibbleOffset>>1)&3u)};
361 ++nibbleOffset;
363 const auto nval = (nibbleData[byteOffset]>>byteShift) & NibbleMask;
364 std::ignore = decode_sample(al::to_underlying(nval));
367 /* Second, decode the rest of the block and write to the output, until
368 * the end of the block or the end of output.
370 const size_t todo{std::min(samplesPerBlock-startOffset, size_t(dstSamples.end()-dst))};
371 dst = std::generate_n(dst, todo, [&]
373 const size_t byteShift{(nibbleOffset&1) * 4};
374 const size_t wordOffset{(nibbleOffset>>1) & ~3_uz};
375 const size_t byteOffset{wordOffset*srcStep + ((nibbleOffset>>1)&3u)};
376 ++nibbleOffset;
378 const auto nval = (nibbleData[byteOffset]>>byteShift) & NibbleMask;
379 return static_cast<float>(decode_sample(al::to_underlying(nval))) / 32768.0f;
384 template<>
385 inline void LoadSamples<FmtMSADPCM>(al::span<float> dstSamples, al::span<const std::byte> src,
386 const size_t srcChan, const size_t srcOffset, const size_t srcStep,
387 const size_t samplesPerBlock) noexcept
389 assert(srcStep > 0 || srcStep <= 2);
390 assert(srcChan < srcStep);
391 assert(samplesPerBlock > 2);
392 const size_t blockBytes{((samplesPerBlock-2)/2 + 7)*srcStep};
394 src = src.subspan(srcOffset/samplesPerBlock*blockBytes);
395 size_t skip{srcOffset % samplesPerBlock};
397 auto dst = dstSamples.begin();
398 while(dst != dstSamples.end())
400 /* Each MS ADPCM block starts with an 8-bit block predictor, used to
401 * dictate how the two sample history values are mixed with the decoded
402 * sample, and an initial signed 16-bit delta value which scales the
403 * nibble sample value. This is followed by the two initial 16-bit
404 * sample history values.
406 const uint8_t blockpred{std::min(uint8_t(src[srcChan]), uint8_t{6})};
407 int delta{int(src[srcStep + 2*srcChan + 0]) | (int(src[srcStep + 2*srcChan + 1]) << 8)};
409 auto sampleHistory = std::array{
410 int(src[3*srcStep + 2*srcChan + 0]) | (int(src[3*srcStep + 2*srcChan + 1])<<8),
411 int(src[5*srcStep + 2*srcChan + 0]) | (int(src[5*srcStep + 2*srcChan + 1])<<8)};
412 const auto input = src.subspan(7*srcStep);
413 src = src.subspan(blockBytes);
415 const auto coeffs = al::span{MSADPCMAdaptionCoeff[blockpred]};
416 delta = (delta^0x8000) - 32768;
417 sampleHistory[0] = (sampleHistory[0]^0x8000) - 32768;
418 sampleHistory[1] = (sampleHistory[1]^0x8000) - 32768;
420 /* The second history sample is "older", so it's the first to be
421 * written out.
423 if(skip == 0)
425 *dst = static_cast<float>(sampleHistory[1]) / 32768.0f;
426 if(++dst == dstSamples.end()) return;
427 *dst = static_cast<float>(sampleHistory[0]) / 32768.0f;
428 if(++dst == dstSamples.end()) return;
430 else if(skip == 1)
432 --skip;
433 *dst = static_cast<float>(sampleHistory[0]) / 32768.0f;
434 if(++dst == dstSamples.end()) return;
436 else
437 skip -= 2;
439 auto decode_sample = [&sampleHistory,&delta,coeffs](const uint8_t nibble)
441 int pred{(sampleHistory[0]*coeffs[0] + sampleHistory[1]*coeffs[1]) / 256};
442 pred += ((nibble^0x08) - 0x08) * delta;
443 pred = std::clamp(pred, -32768, 32767);
445 sampleHistory[1] = sampleHistory[0];
446 sampleHistory[0] = pred;
448 delta = (MSADPCMAdaption[nibble] * delta) / 256;
449 delta = std::max(16, delta);
451 return pred;
454 /* The rest of the block is a series of nibbles, interleaved per-
455 * channel. First, skip samples.
457 static constexpr auto NibbleMask = std::byte{0xf};
458 const size_t startOffset{skip + 2};
459 size_t nibbleOffset{srcChan};
460 for(;skip;--skip)
462 const size_t byteOffset{nibbleOffset>>1};
463 const size_t byteShift{((nibbleOffset&1)^1) * 4};
464 nibbleOffset += srcStep;
466 const auto nval = (input[byteOffset]>>byteShift) & NibbleMask;
467 std::ignore = decode_sample(al::to_underlying(nval));
470 /* Now decode the rest of the block, until the end of the block or the
471 * dst buffer is filled.
473 const size_t todo{std::min(samplesPerBlock-startOffset, size_t(dstSamples.end()-dst))};
474 dst = std::generate_n(dst, todo, [&]
476 const size_t byteOffset{nibbleOffset>>1};
477 const size_t byteShift{((nibbleOffset&1)^1) * 4};
478 nibbleOffset += srcStep;
480 const auto nval = (input[byteOffset]>>byteShift) & NibbleMask;
481 return static_cast<float>(decode_sample(al::to_underlying(nval))) / 32768.0f;
486 void LoadSamples(const al::span<float> dstSamples, const al::span<const std::byte> src,
487 const size_t srcChan, const size_t srcOffset, const FmtType srcType, const size_t srcStep,
488 const size_t samplesPerBlock) noexcept
490 #define HANDLE_FMT(T) case T: \
491 LoadSamples<T>(dstSamples, src, srcChan, srcOffset, srcStep, \
492 samplesPerBlock); \
493 break
495 switch(srcType)
497 HANDLE_FMT(FmtUByte);
498 HANDLE_FMT(FmtShort);
499 HANDLE_FMT(FmtInt);
500 HANDLE_FMT(FmtFloat);
501 HANDLE_FMT(FmtDouble);
502 HANDLE_FMT(FmtMulaw);
503 HANDLE_FMT(FmtAlaw);
504 HANDLE_FMT(FmtIMA4);
505 HANDLE_FMT(FmtMSADPCM);
507 #undef HANDLE_FMT
510 void LoadBufferStatic(VoiceBufferItem *buffer, VoiceBufferItem *bufferLoopItem,
511 const size_t dataPosInt, const FmtType sampleType, const size_t srcChannel,
512 const size_t srcStep, al::span<float> voiceSamples)
514 if(!bufferLoopItem)
516 float lastSample{0.0f};
517 /* Load what's left to play from the buffer */
518 if(buffer->mSampleLen > dataPosInt) LIKELY
520 const size_t buffer_remaining{buffer->mSampleLen - dataPosInt};
521 const size_t remaining{std::min(voiceSamples.size(), buffer_remaining)};
522 LoadSamples(voiceSamples.first(remaining), buffer->mSamples, srcChannel, dataPosInt,
523 sampleType, srcStep, buffer->mBlockAlign);
524 lastSample = voiceSamples[remaining-1];
525 voiceSamples = voiceSamples.subspan(remaining);
528 if(const size_t toFill{voiceSamples.size()})
529 std::fill_n(voiceSamples.begin(), toFill, lastSample);
531 else
533 const size_t loopStart{buffer->mLoopStart};
534 const size_t loopEnd{buffer->mLoopEnd};
535 ASSUME(loopEnd > loopStart);
537 const size_t intPos{(dataPosInt < loopEnd) ? dataPosInt
538 : (((dataPosInt-loopStart)%(loopEnd-loopStart)) + loopStart)};
540 /* Load what's left of this loop iteration */
541 const size_t remaining{std::min(voiceSamples.size(), loopEnd-dataPosInt)};
542 LoadSamples(voiceSamples.first(remaining), buffer->mSamples, srcChannel, intPos,
543 sampleType, srcStep, buffer->mBlockAlign);
544 voiceSamples = voiceSamples.subspan(remaining);
546 /* Load repeats of the loop to fill the buffer. */
547 const size_t loopSize{loopEnd - loopStart};
548 while(const size_t toFill{std::min(voiceSamples.size(), loopSize)})
550 LoadSamples(voiceSamples.first(toFill), buffer->mSamples, srcChannel, loopStart,
551 sampleType, srcStep, buffer->mBlockAlign);
552 voiceSamples = voiceSamples.subspan(toFill);
557 void LoadBufferCallback(VoiceBufferItem *buffer, const size_t dataPosInt,
558 const size_t numCallbackSamples, const FmtType sampleType, const size_t srcChannel,
559 const size_t srcStep, al::span<float> voiceSamples)
561 float lastSample{0.0f};
562 if(numCallbackSamples > dataPosInt) LIKELY
564 const size_t remaining{std::min(voiceSamples.size(), numCallbackSamples-dataPosInt)};
565 LoadSamples(voiceSamples.first(remaining), buffer->mSamples, srcChannel, dataPosInt,
566 sampleType, srcStep, buffer->mBlockAlign);
567 lastSample = voiceSamples[remaining-1];
568 voiceSamples = voiceSamples.subspan(remaining);
571 if(const size_t toFill{voiceSamples.size()})
572 std::fill_n(voiceSamples.begin(), toFill, lastSample);
575 void LoadBufferQueue(VoiceBufferItem *buffer, VoiceBufferItem *bufferLoopItem,
576 size_t dataPosInt, const FmtType sampleType, const size_t srcChannel,
577 const size_t srcStep, al::span<float> voiceSamples)
579 float lastSample{0.0f};
580 /* Crawl the buffer queue to fill in the temp buffer */
581 while(buffer && !voiceSamples.empty())
583 if(dataPosInt >= buffer->mSampleLen)
585 dataPosInt -= buffer->mSampleLen;
586 buffer = buffer->mNext.load(std::memory_order_acquire);
587 if(!buffer) buffer = bufferLoopItem;
588 continue;
591 const size_t remaining{std::min(voiceSamples.size(), buffer->mSampleLen-dataPosInt)};
592 LoadSamples(voiceSamples.first(remaining), buffer->mSamples, srcChannel, dataPosInt,
593 sampleType, srcStep, buffer->mBlockAlign);
595 lastSample = voiceSamples[remaining-1];
596 voiceSamples = voiceSamples.subspan(remaining);
597 if(voiceSamples.empty())
598 break;
600 dataPosInt = 0;
601 buffer = buffer->mNext.load(std::memory_order_acquire);
602 if(!buffer) buffer = bufferLoopItem;
604 if(const size_t toFill{voiceSamples.size()})
605 std::fill_n(voiceSamples.begin(), toFill, lastSample);
609 void DoHrtfMix(const al::span<const float> samples, DirectParams &parms, const float TargetGain,
610 const size_t Counter, size_t OutPos, const bool IsPlaying, DeviceBase *Device)
612 const uint IrSize{Device->mIrSize};
613 const auto HrtfSamples = al::span{Device->ExtraSampleData};
614 const auto AccumSamples = al::span{Device->HrtfAccumData};
616 /* Copy the HRTF history and new input samples into a temp buffer. */
617 auto src_iter = std::copy(parms.Hrtf.History.begin(), parms.Hrtf.History.end(),
618 HrtfSamples.begin());
619 std::copy_n(samples.begin(), samples.size(), src_iter);
620 /* Copy the last used samples back into the history buffer for later. */
621 if(IsPlaying) LIKELY
623 const auto endsamples = HrtfSamples.subspan(samples.size(), parms.Hrtf.History.size());
624 std::copy_n(endsamples.cbegin(), endsamples.size(), parms.Hrtf.History.begin());
627 /* If fading and this is the first mixing pass, fade between the IRs. */
628 size_t fademix{0};
629 if(Counter && OutPos == 0)
631 fademix = std::min(samples.size(), Counter);
633 float gain{TargetGain};
635 /* The new coefficients need to fade in completely since they're
636 * replacing the old ones. To keep the gain fading consistent,
637 * interpolate between the old and new target gains given how much of
638 * the fade time this mix handles.
640 if(Counter > fademix)
642 const float a{static_cast<float>(fademix) / static_cast<float>(Counter)};
643 gain = lerpf(parms.Hrtf.Old.Gain, TargetGain, a);
646 MixHrtfFilter hrtfparams{
647 parms.Hrtf.Target.Coeffs,
648 parms.Hrtf.Target.Delay,
649 0.0f, gain / static_cast<float>(fademix)};
650 MixHrtfBlendSamples(HrtfSamples, AccumSamples.subspan(OutPos), IrSize, &parms.Hrtf.Old,
651 &hrtfparams, fademix);
653 /* Update the old parameters with the result. */
654 parms.Hrtf.Old = parms.Hrtf.Target;
655 parms.Hrtf.Old.Gain = gain;
656 OutPos += fademix;
659 if(fademix < samples.size())
661 const size_t todo{samples.size() - fademix};
662 float gain{TargetGain};
664 /* Interpolate the target gain if the gain fading lasts longer than
665 * this mix.
667 if(Counter > samples.size())
669 const float a{static_cast<float>(todo) / static_cast<float>(Counter-fademix)};
670 gain = lerpf(parms.Hrtf.Old.Gain, TargetGain, a);
673 MixHrtfFilter hrtfparams{
674 parms.Hrtf.Target.Coeffs,
675 parms.Hrtf.Target.Delay,
676 parms.Hrtf.Old.Gain,
677 (gain - parms.Hrtf.Old.Gain) / static_cast<float>(todo)};
678 MixHrtfSamples(HrtfSamples.subspan(fademix), AccumSamples.subspan(OutPos), IrSize,
679 &hrtfparams, todo);
681 /* Store the now-current gain for next time. */
682 parms.Hrtf.Old.Gain = gain;
686 void DoNfcMix(const al::span<const float> samples, al::span<FloatBufferLine> OutBuffer,
687 DirectParams &parms, const al::span<const float,MaxOutputChannels> OutGains,
688 const uint Counter, const uint OutPos, DeviceBase *Device)
690 using FilterProc = void (NfcFilter::*)(const al::span<const float>, const al::span<float>);
691 static constexpr std::array<FilterProc,MaxAmbiOrder+1> NfcProcess{{
692 nullptr, &NfcFilter::process1, &NfcFilter::process2, &NfcFilter::process3}};
694 MixSamples(samples, al::span{OutBuffer[0]}.subspan(OutPos), parms.Gains.Current[0],
695 OutGains[0], Counter);
696 OutBuffer = OutBuffer.subspan(1);
697 auto CurrentGains = al::span{parms.Gains.Current}.subspan(1);
698 auto TargetGains = OutGains.subspan(1);
700 const auto nfcsamples = al::span{Device->ExtraSampleData}.first(samples.size());
701 size_t order{1};
702 while(const size_t chancount{Device->NumChannelsPerOrder[order]})
704 (parms.NFCtrlFilter.*NfcProcess[order])(samples, nfcsamples);
705 MixSamples(nfcsamples, OutBuffer.first(chancount), CurrentGains, TargetGains, Counter,
706 OutPos);
707 if(++order == MaxAmbiOrder+1)
708 break;
709 OutBuffer = OutBuffer.subspan(chancount);
710 CurrentGains = CurrentGains.subspan(chancount);
711 TargetGains = TargetGains.subspan(chancount);
715 } // namespace
717 void Voice::mix(const State vstate, ContextBase *Context, const nanoseconds deviceTime,
718 const uint SamplesToDo)
720 static constexpr std::array<float,MaxOutputChannels> SilentTarget{};
722 ASSUME(SamplesToDo > 0);
724 DeviceBase *Device{Context->mDevice};
725 const uint NumSends{Device->NumAuxSends};
727 /* Get voice info */
728 int DataPosInt{mPosition.load(std::memory_order_relaxed)};
729 uint DataPosFrac{mPositionFrac.load(std::memory_order_relaxed)};
730 VoiceBufferItem *BufferListItem{mCurrentBuffer.load(std::memory_order_relaxed)};
731 VoiceBufferItem *BufferLoopItem{mLoopBuffer.load(std::memory_order_relaxed)};
732 const uint increment{mStep};
733 if(increment < 1) UNLIKELY
735 /* If the voice is supposed to be stopping but can't be mixed, just
736 * stop it before bailing.
738 if(vstate == Stopping)
739 mPlayState.store(Stopped, std::memory_order_release);
740 return;
743 /* If the static voice's current position is beyond the buffer loop end
744 * position, disable looping.
746 if(mFlags.test(VoiceIsStatic) && BufferLoopItem)
748 if(DataPosInt >= 0 && static_cast<uint>(DataPosInt) >= BufferListItem->mLoopEnd)
749 BufferLoopItem = nullptr;
752 uint OutPos{0u};
754 /* Check if we're doing a delayed start, and we start in this update. */
755 if(mStartTime > deviceTime) UNLIKELY
757 /* If the voice is supposed to be stopping but hasn't actually started
758 * yet, make sure its stopped.
760 if(vstate == Stopping)
762 mPlayState.store(Stopped, std::memory_order_release);
763 return;
766 /* If the start time is too far ahead, don't bother. */
767 auto diff = mStartTime - deviceTime;
768 if(diff >= seconds{1})
769 return;
771 /* Get the number of samples ahead of the current time that output
772 * should start at. Skip this update if it's beyond the output sample
773 * count.
775 OutPos = static_cast<uint>(round<seconds>(diff * Device->Frequency).count());
776 if(OutPos >= SamplesToDo) return;
779 /* Calculate the number of samples to mix, and the number of (resampled)
780 * samples that need to be loaded (mixing samples and decoder padding).
782 const uint samplesToMix{SamplesToDo - OutPos};
783 const uint samplesToLoad{samplesToMix + mDecoderPadding};
785 /* Get a span of pointers to hold the floating point, deinterlaced,
786 * resampled buffer data to be mixed.
788 auto SamplePointers = std::array<float*,DeviceBase::MixerChannelsMax>{};
789 const auto MixingSamples = al::span{SamplePointers}.first(mChans.size());
791 const uint channelStep{(samplesToLoad+3u)&~3u};
792 auto base = Device->mSampleData.end() - MixingSamples.size()*channelStep;
793 std::generate(MixingSamples.begin(), MixingSamples.end(), [&base,channelStep]
795 const auto ret = base;
796 base += channelStep;
797 return al::to_address(ret);
801 /* UHJ2 and SuperStereo only have 2 buffer channels, but 3 mixing channels
802 * (3rd channel is generated from decoding). MonoDup only has 1 buffer
803 * channel, but 2 mixing channels (2nd channel is just duplicated).
805 const size_t realChannels{(mFmtChannels == FmtMonoDup) ? 1u
806 : (mFmtChannels == FmtUHJ2 || mFmtChannels == FmtSuperStereo) ? 2u
807 : MixingSamples.size()};
808 for(size_t chan{0};chan < realChannels;++chan)
810 static constexpr uint ResBufSize{std::tuple_size_v<decltype(DeviceBase::mResampleData)>};
811 static constexpr uint srcSizeMax{ResBufSize - MaxResamplerEdge};
813 const al::span prevSamples{mPrevSamples[chan]};
814 std::copy(prevSamples.cbegin(), prevSamples.cend(), Device->mResampleData.begin());
815 const auto resampleBuffer = al::span{Device->mResampleData}.subspan<MaxResamplerEdge>();
816 int intPos{DataPosInt};
817 uint fracPos{DataPosFrac};
819 /* Load samples for this channel from the available buffer(s), with
820 * resampling.
822 for(uint samplesLoaded{0};samplesLoaded < samplesToLoad;)
824 /* Calculate the number of dst samples that can be loaded this
825 * iteration, given the available resampler buffer size, and the
826 * number of src samples that are needed to load it.
828 auto calc_buffer_sizes = [fracPos,increment](uint dstBufferSize)
830 /* If ext=true, calculate the last written dst pos from the dst
831 * count, convert to the last read src pos, then add one to get
832 * the src count.
834 * If ext=false, convert the dst count to src count directly.
836 * Without this, the src count could be short by one when
837 * increment < 1.0, or not have a full src at the end when
838 * increment > 1.0.
840 const bool ext{increment <= MixerFracOne};
841 uint64_t dataSize64{dstBufferSize - ext};
842 dataSize64 = (dataSize64*increment + fracPos) >> MixerFracBits;
843 /* Also include resampler padding. */
844 dataSize64 += ext + MaxResamplerEdge;
846 if(dataSize64 <= srcSizeMax)
847 return std::array{dstBufferSize, static_cast<uint>(dataSize64)};
849 /* If the source size got saturated, we can't fill the desired
850 * dst size. Figure out how many dst samples we can fill.
852 dataSize64 = srcSizeMax - MaxResamplerEdge;
853 dataSize64 = ((dataSize64<<MixerFracBits) - fracPos) / increment;
854 if(dataSize64 < dstBufferSize)
856 /* Some resamplers require the destination being 16-byte
857 * aligned, so limit to a multiple of 4 samples to maintain
858 * alignment if we need to do another iteration after this.
860 dstBufferSize = static_cast<uint>(dataSize64) & ~3u;
862 return std::array{dstBufferSize, srcSizeMax};
864 const auto [dstBufferSize, srcBufferSize] = calc_buffer_sizes(
865 samplesToLoad - samplesLoaded);
867 size_t srcSampleDelay{0};
868 if(intPos < 0) UNLIKELY
870 /* If the current position is negative, there's that many
871 * silent samples to load before using the buffer.
873 srcSampleDelay = static_cast<uint>(-intPos);
874 if(srcSampleDelay >= srcBufferSize)
876 /* If the number of silent source samples exceeds the
877 * number to load, the output will be silent.
879 std::fill_n(MixingSamples[chan]+samplesLoaded, dstBufferSize, 0.0f);
880 std::fill_n(resampleBuffer.begin(), srcBufferSize, 0.0f);
881 goto skip_resample;
884 std::fill_n(resampleBuffer.begin(), srcSampleDelay, 0.0f);
887 /* Load the necessary samples from the given buffer(s). */
888 if(!BufferListItem) UNLIKELY
890 const uint avail{std::min(srcBufferSize, MaxResamplerEdge)};
891 const uint tofill{std::max(srcBufferSize, MaxResamplerEdge)};
892 const auto srcbuf = resampleBuffer.first(tofill);
894 /* When loading from a voice that ended prematurely, only take
895 * the samples that get closest to 0 amplitude. This helps
896 * certain sounds fade out better.
898 auto srciter = std::min_element(srcbuf.begin(), srcbuf.begin()+ptrdiff_t(avail),
899 [](const float l, const float r) { return std::abs(l) < std::abs(r); });
901 std::fill(srciter+1, srcbuf.end(), *srciter);
903 else if(mFlags.test(VoiceIsStatic))
905 const auto uintPos = static_cast<uint>(std::max(intPos, 0));
906 const auto bufferSamples = resampleBuffer.subspan(srcSampleDelay,
907 srcBufferSize-srcSampleDelay);
908 LoadBufferStatic(BufferListItem, BufferLoopItem, uintPos, mFmtType, chan,
909 mFrameStep, bufferSamples);
911 else if(mFlags.test(VoiceIsCallback))
913 const auto uintPos = static_cast<uint>(std::max(intPos, 0));
914 const uint callbackBase{mCallbackBlockBase * mSamplesPerBlock};
915 const size_t bufferOffset{uintPos - callbackBase};
916 const size_t needSamples{bufferOffset + srcBufferSize - srcSampleDelay};
917 const size_t needBlocks{(needSamples + mSamplesPerBlock-1) / mSamplesPerBlock};
918 if(!mFlags.test(VoiceCallbackStopped) && needBlocks > mNumCallbackBlocks)
920 const size_t byteOffset{mNumCallbackBlocks*size_t{mBytesPerBlock}};
921 const size_t needBytes{(needBlocks-mNumCallbackBlocks)*size_t{mBytesPerBlock}};
923 const int gotBytes{BufferListItem->mCallback(BufferListItem->mUserData,
924 &BufferListItem->mSamples[byteOffset], static_cast<int>(needBytes))};
925 if(gotBytes < 0)
926 mFlags.set(VoiceCallbackStopped);
927 else if(static_cast<uint>(gotBytes) < needBytes)
929 mFlags.set(VoiceCallbackStopped);
930 mNumCallbackBlocks += static_cast<uint>(gotBytes) / mBytesPerBlock;
932 else
933 mNumCallbackBlocks = static_cast<uint>(needBlocks);
935 const size_t numSamples{size_t{mNumCallbackBlocks} * mSamplesPerBlock};
936 const auto bufferSamples = resampleBuffer.subspan(srcSampleDelay,
937 srcBufferSize-srcSampleDelay);
938 LoadBufferCallback(BufferListItem, bufferOffset, numSamples, mFmtType, chan,
939 mFrameStep, bufferSamples);
941 else
943 const auto uintPos = static_cast<uint>(std::max(intPos, 0));
944 const auto bufferSamples = resampleBuffer.subspan(srcSampleDelay,
945 srcBufferSize-srcSampleDelay);
946 LoadBufferQueue(BufferListItem, BufferLoopItem, uintPos, mFmtType, chan,
947 mFrameStep, bufferSamples);
950 /* If there's a matching sample step and no phase offset, use a
951 * simple copy for resampling.
953 if(increment == MixerFracOne && fracPos == 0)
954 std::copy_n(resampleBuffer.cbegin(), dstBufferSize,
955 MixingSamples[chan]+samplesLoaded);
956 else
957 mResampler(&mResampleState, Device->mResampleData, fracPos, increment,
958 {MixingSamples[chan]+samplesLoaded, dstBufferSize});
960 /* Store the last source samples used for next time. */
961 if(vstate == Playing) LIKELY
963 /* Only store samples for the end of the mix, excluding what
964 * gets loaded for decoder padding.
966 const uint loadEnd{samplesLoaded + dstBufferSize};
967 if(samplesToMix > samplesLoaded && samplesToMix <= loadEnd) LIKELY
969 const size_t dstOffset{samplesToMix - samplesLoaded};
970 const size_t srcOffset{(dstOffset*increment + fracPos) >> MixerFracBits};
971 std::copy_n(Device->mResampleData.cbegin()+srcOffset, prevSamples.size(),
972 prevSamples.begin());
976 skip_resample:
977 samplesLoaded += dstBufferSize;
978 if(samplesLoaded < samplesToLoad)
980 fracPos += dstBufferSize*increment;
981 const uint srcOffset{fracPos >> MixerFracBits};
982 fracPos &= MixerFracMask;
983 intPos += static_cast<int>(srcOffset);
985 /* If more samples need to be loaded, copy the back of the
986 * resampleBuffer to the front to reuse it. prevSamples isn't
987 * reliable since it's only updated for the end of the mix.
989 std::copy_n(Device->mResampleData.cbegin()+srcOffset, MaxResamplerPadding,
990 Device->mResampleData.begin());
994 if(mFmtChannels == FmtMonoDup)
996 /* NOTE: a mono source shouldn't have a decoder or the VoiceIsAmbisonic
997 * flag, so aliasing instead of copying to the second channel shouldn't
998 * be a problem.
1000 MixingSamples[1] = MixingSamples[0];
1002 else for(auto &samples : MixingSamples.subspan(realChannels))
1003 std::fill_n(samples, samplesToLoad, 0.0f);
1005 if(mDecoder)
1006 mDecoder->decode(MixingSamples, samplesToMix, (vstate==Playing));
1008 if(mFlags.test(VoiceIsAmbisonic))
1010 auto voiceSamples = MixingSamples.begin();
1011 for(auto &chandata : mChans)
1013 chandata.mAmbiSplitter.processScale({*voiceSamples, samplesToMix},
1014 chandata.mAmbiHFScale, chandata.mAmbiLFScale);
1015 ++voiceSamples;
1019 const uint Counter{mFlags.test(VoiceIsFading) ? std::min(samplesToMix, 64u) : 0u};
1020 if(!Counter)
1022 /* No fading, just overwrite the old/current params. */
1023 for(auto &chandata : mChans)
1026 DirectParams &parms = chandata.mDryParams;
1027 if(!mFlags.test(VoiceHasHrtf))
1028 parms.Gains.Current = parms.Gains.Target;
1029 else
1030 parms.Hrtf.Old = parms.Hrtf.Target;
1032 for(uint send{0};send < NumSends;++send)
1034 if(mSend[send].Buffer.empty())
1035 continue;
1037 SendParams &parms = chandata.mWetParams[send];
1038 parms.Gains.Current = parms.Gains.Target;
1043 auto voiceSamples = MixingSamples.begin();
1044 for(auto &chandata : mChans)
1046 /* Now filter and mix to the appropriate outputs. */
1047 const al::span<float,BufferLineSize> FilterBuf{Device->FilteredData};
1049 DirectParams &parms = chandata.mDryParams;
1050 const auto samples = DoFilters(parms.LowPass, parms.HighPass, FilterBuf,
1051 {*voiceSamples, samplesToMix}, mDirect.FilterType);
1053 if(mFlags.test(VoiceHasHrtf))
1055 const float TargetGain{parms.Hrtf.Target.Gain * float(vstate == Playing)};
1056 DoHrtfMix(samples, parms, TargetGain, Counter, OutPos, (vstate == Playing),
1057 Device);
1059 else
1061 const auto TargetGains = (vstate == Playing) ? al::span{parms.Gains.Target}
1062 : al::span{SilentTarget};
1063 if(mFlags.test(VoiceHasNfc))
1064 DoNfcMix(samples, mDirect.Buffer, parms, TargetGains, Counter, OutPos, Device);
1065 else
1066 MixSamples(samples, mDirect.Buffer, parms.Gains.Current, TargetGains, Counter,
1067 OutPos);
1071 for(uint send{0};send < NumSends;++send)
1073 if(mSend[send].Buffer.empty())
1074 continue;
1076 SendParams &parms = chandata.mWetParams[send];
1077 const auto samples = DoFilters(parms.LowPass, parms.HighPass, FilterBuf,
1078 {*voiceSamples, samplesToMix}, mSend[send].FilterType);
1080 const auto TargetGains = (vstate == Playing) ? al::span{parms.Gains.Target}
1081 : al::span{SilentTarget};
1082 MixSamples(samples, mSend[send].Buffer, parms.Gains.Current, TargetGains, Counter,
1083 OutPos);
1086 ++voiceSamples;
1089 mFlags.set(VoiceIsFading);
1091 /* Don't update positions and buffers if we were stopping. */
1092 if(vstate == Stopping) UNLIKELY
1094 mPlayState.store(Stopped, std::memory_order_release);
1095 return;
1098 /* Update voice positions and buffers as needed. */
1099 DataPosFrac += increment*samplesToMix;
1100 DataPosInt += static_cast<int>(DataPosFrac>>MixerFracBits);
1101 DataPosFrac &= MixerFracMask;
1103 uint buffers_done{0u};
1104 if(BufferListItem && DataPosInt > 0) LIKELY
1106 if(mFlags.test(VoiceIsStatic))
1108 if(BufferLoopItem)
1110 /* Handle looping static source */
1111 const uint LoopStart{BufferListItem->mLoopStart};
1112 const uint LoopEnd{BufferListItem->mLoopEnd};
1113 uint DataPosUInt{static_cast<uint>(DataPosInt)};
1114 if(DataPosUInt >= LoopEnd)
1116 assert(LoopEnd > LoopStart);
1117 DataPosUInt = ((DataPosUInt-LoopStart)%(LoopEnd-LoopStart)) + LoopStart;
1118 DataPosInt = static_cast<int>(DataPosUInt);
1121 else
1123 /* Handle non-looping static source */
1124 if(static_cast<uint>(DataPosInt) >= BufferListItem->mSampleLen)
1125 BufferListItem = nullptr;
1128 else if(mFlags.test(VoiceIsCallback))
1130 /* Handle callback buffer source */
1131 const uint currentBlock{static_cast<uint>(DataPosInt) / mSamplesPerBlock};
1132 const uint blocksDone{currentBlock - mCallbackBlockBase};
1133 if(blocksDone < mNumCallbackBlocks)
1135 const size_t byteOffset{blocksDone*size_t{mBytesPerBlock}};
1136 const size_t byteEnd{mNumCallbackBlocks*size_t{mBytesPerBlock}};
1137 const al::span data{BufferListItem->mSamples};
1138 std::copy(data.cbegin()+ptrdiff_t(byteOffset), data.cbegin()+ptrdiff_t(byteEnd),
1139 data.begin());
1140 mNumCallbackBlocks -= blocksDone;
1141 mCallbackBlockBase += blocksDone;
1143 else
1145 BufferListItem = nullptr;
1146 mNumCallbackBlocks = 0;
1147 mCallbackBlockBase += blocksDone;
1150 else
1152 /* Handle streaming source */
1153 do {
1154 if(BufferListItem->mSampleLen > static_cast<uint>(DataPosInt))
1155 break;
1157 DataPosInt -= static_cast<int>(BufferListItem->mSampleLen);
1159 ++buffers_done;
1160 BufferListItem = BufferListItem->mNext.load(std::memory_order_relaxed);
1161 if(!BufferListItem) BufferListItem = BufferLoopItem;
1162 } while(BufferListItem);
1166 /* Capture the source ID in case it gets reset for stopping. */
1167 const uint SourceID{mSourceID.load(std::memory_order_relaxed)};
1169 /* Update voice info */
1170 mPosition.store(DataPosInt, std::memory_order_relaxed);
1171 mPositionFrac.store(DataPosFrac, std::memory_order_relaxed);
1172 mCurrentBuffer.store(BufferListItem, std::memory_order_relaxed);
1173 if(!BufferListItem)
1175 mLoopBuffer.store(nullptr, std::memory_order_relaxed);
1176 mSourceID.store(0u, std::memory_order_relaxed);
1178 std::atomic_thread_fence(std::memory_order_release);
1180 /* Send any events now, after the position/buffer info was updated. */
1181 const auto enabledevt = Context->mEnabledEvts.load(std::memory_order_acquire);
1182 if(buffers_done > 0 && enabledevt.test(al::to_underlying(AsyncEnableBits::BufferCompleted)))
1184 RingBuffer *ring{Context->mAsyncEvents.get()};
1185 auto evt_vec = ring->getWriteVector();
1186 if(evt_vec[0].len > 0)
1188 auto &evt = InitAsyncEvent<AsyncBufferCompleteEvent>(evt_vec[0].buf);
1189 evt.mId = SourceID;
1190 evt.mCount = buffers_done;
1191 ring->writeAdvance(1);
1195 if(!BufferListItem)
1197 /* If the voice just ended, set it to Stopping so the next render
1198 * ensures any residual noise fades to 0 amplitude.
1200 mPlayState.store(Stopping, std::memory_order_release);
1201 if(enabledevt.test(al::to_underlying(AsyncEnableBits::SourceState)))
1202 SendSourceStoppedEvent(Context, SourceID);
1206 void Voice::prepare(DeviceBase *device)
1208 /* Even if storing really high order ambisonics, we only mix channels for
1209 * orders up to the device order. The rest are simply dropped.
1211 uint num_channels{(mFmtChannels == FmtMonoDup) ? 2
1212 : (mFmtChannels == FmtUHJ2 || mFmtChannels == FmtSuperStereo) ? 3
1213 : ChannelsFromFmt(mFmtChannels, std::min(mAmbiOrder, device->mAmbiOrder))};
1214 if(num_channels > device->MixerChannelsMax) UNLIKELY
1216 ERR("Unexpected channel count: %u (limit: %zu, %s : %d)\n", num_channels,
1217 device->MixerChannelsMax, NameFromFormat(mFmtChannels), mAmbiOrder);
1218 num_channels = device->MixerChannelsMax;
1220 if(mChans.capacity() > 2 && num_channels < mChans.capacity())
1222 decltype(mChans){}.swap(mChans);
1223 decltype(mPrevSamples){}.swap(mPrevSamples);
1225 mChans.reserve(std::max(2u, num_channels));
1226 mChans.resize(num_channels);
1227 mPrevSamples.reserve(std::max(2u, num_channels));
1228 mPrevSamples.resize(num_channels);
1230 mDecoder = nullptr;
1231 mDecoderPadding = 0;
1232 if(mFmtChannels == FmtSuperStereo)
1234 switch(UhjDecodeQuality)
1236 case UhjQualityType::IIR:
1237 mDecoder = std::make_unique<UhjStereoDecoderIIR>();
1238 mDecoderPadding = UhjStereoDecoderIIR::sInputPadding;
1239 break;
1240 case UhjQualityType::FIR256:
1241 mDecoder = std::make_unique<UhjStereoDecoder<UhjLength256>>();
1242 mDecoderPadding = UhjStereoDecoder<UhjLength256>::sInputPadding;
1243 break;
1244 case UhjQualityType::FIR512:
1245 mDecoder = std::make_unique<UhjStereoDecoder<UhjLength512>>();
1246 mDecoderPadding = UhjStereoDecoder<UhjLength512>::sInputPadding;
1247 break;
1250 else if(IsUHJ(mFmtChannels))
1252 switch(UhjDecodeQuality)
1254 case UhjQualityType::IIR:
1255 mDecoder = std::make_unique<UhjDecoderIIR>();
1256 mDecoderPadding = UhjDecoderIIR::sInputPadding;
1257 break;
1258 case UhjQualityType::FIR256:
1259 mDecoder = std::make_unique<UhjDecoder<UhjLength256>>();
1260 mDecoderPadding = UhjDecoder<UhjLength256>::sInputPadding;
1261 break;
1262 case UhjQualityType::FIR512:
1263 mDecoder = std::make_unique<UhjDecoder<UhjLength512>>();
1264 mDecoderPadding = UhjDecoder<UhjLength512>::sInputPadding;
1265 break;
1269 /* Clear the stepping value explicitly so the mixer knows not to mix this
1270 * until the update gets applied.
1272 mStep = 0;
1274 /* Make sure the sample history is cleared. */
1275 std::fill(mPrevSamples.begin(), mPrevSamples.end(), HistoryLine{});
1277 if(mFmtChannels == FmtUHJ2 && !device->mUhjEncoder)
1279 /* 2-channel UHJ needs different shelf filters. However, we can't just
1280 * use different shelf filters after mixing it, given any old speaker
1281 * setup the user has. To make this work, we apply the expected shelf
1282 * filters for decoding UHJ2 to quad (only needs LF scaling), and act
1283 * as if those 4 quad channels are encoded right back into B-Format.
1285 * This isn't perfect, but without an entirely separate and limited
1286 * UHJ2 path, it's better than nothing.
1288 * Note this isn't needed with UHJ output (UHJ2->B-Format->UHJ2 is
1289 * identity, so don't mess with it).
1291 const BandSplitter splitter{device->mXOverFreq / static_cast<float>(device->Frequency)};
1292 for(auto &chandata : mChans)
1294 chandata.mAmbiHFScale = 1.0f;
1295 chandata.mAmbiLFScale = 1.0f;
1296 chandata.mAmbiSplitter = splitter;
1297 chandata.mDryParams = DirectParams{};
1298 chandata.mDryParams.NFCtrlFilter = device->mNFCtrlFilter;
1299 std::fill_n(chandata.mWetParams.begin(), device->NumAuxSends, SendParams{});
1301 mChans[0].mAmbiLFScale = DecoderBase::sWLFScale;
1302 mChans[1].mAmbiLFScale = DecoderBase::sXYLFScale;
1303 mChans[2].mAmbiLFScale = DecoderBase::sXYLFScale;
1304 mFlags.set(VoiceIsAmbisonic);
1306 /* Don't need to set the VoiceIsAmbisonic flag if the device is not higher
1307 * order than the voice. No HF scaling is necessary to mix it.
1309 else if(mAmbiOrder && device->mAmbiOrder > mAmbiOrder)
1311 auto OrdersSpan = Is2DAmbisonic(mFmtChannels)
1312 ? al::span<const uint8_t>{AmbiIndex::OrderFrom2DChannel}
1313 : al::span<const uint8_t>{AmbiIndex::OrderFromChannel};
1314 auto OrderFromChan = OrdersSpan.cbegin();
1315 const auto scales = AmbiScale::GetHFOrderScales(mAmbiOrder, device->mAmbiOrder,
1316 device->m2DMixing);
1318 const BandSplitter splitter{device->mXOverFreq / static_cast<float>(device->Frequency)};
1319 for(auto &chandata : mChans)
1321 chandata.mAmbiHFScale = scales[*(OrderFromChan++)];
1322 chandata.mAmbiLFScale = 1.0f;
1323 chandata.mAmbiSplitter = splitter;
1324 chandata.mDryParams = DirectParams{};
1325 chandata.mDryParams.NFCtrlFilter = device->mNFCtrlFilter;
1326 std::fill_n(chandata.mWetParams.begin(), device->NumAuxSends, SendParams{});
1328 mFlags.set(VoiceIsAmbisonic);
1330 else
1332 for(auto &chandata : mChans)
1334 chandata.mDryParams = DirectParams{};
1335 chandata.mDryParams.NFCtrlFilter = device->mNFCtrlFilter;
1336 std::fill_n(chandata.mWetParams.begin(), device->NumAuxSends, SendParams{});
1338 mFlags.reset(VoiceIsAmbisonic);