Finalize AL_SOFT_UHJ
[openal-soft.git] / alc / alu.cpp
blobef885152ee044409c2bced35803791e948857f5f
1 /**
2 * OpenAL cross platform audio library
3 * Copyright (C) 1999-2007 by authors.
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Library General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Library General Public License for more details.
14 * You should have received a copy of the GNU Library General Public
15 * License along with this library; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 * Or go to http://www.gnu.org/copyleft/lgpl.html
21 #include "config.h"
23 #include "alu.h"
25 #include <algorithm>
26 #include <array>
27 #include <atomic>
28 #include <cassert>
29 #include <chrono>
30 #include <climits>
31 #include <cstdarg>
32 #include <cstdio>
33 #include <cstdlib>
34 #include <functional>
35 #include <iterator>
36 #include <limits>
37 #include <memory>
38 #include <new>
39 #include <stdint.h>
40 #include <utility>
42 #include "almalloc.h"
43 #include "alnumbers.h"
44 #include "alnumeric.h"
45 #include "alspan.h"
46 #include "alstring.h"
47 #include "atomic.h"
48 #include "core/ambidefs.h"
49 #include "core/async_event.h"
50 #include "core/bformatdec.h"
51 #include "core/bs2b.h"
52 #include "core/bsinc_defs.h"
53 #include "core/bsinc_tables.h"
54 #include "core/bufferline.h"
55 #include "core/buffer_storage.h"
56 #include "core/context.h"
57 #include "core/cpu_caps.h"
58 #include "core/devformat.h"
59 #include "core/device.h"
60 #include "core/effects/base.h"
61 #include "core/effectslot.h"
62 #include "core/filters/biquad.h"
63 #include "core/filters/nfc.h"
64 #include "core/fpu_ctrl.h"
65 #include "core/hrtf.h"
66 #include "core/mastering.h"
67 #include "core/mixer.h"
68 #include "core/mixer/defs.h"
69 #include "core/mixer/hrtfdefs.h"
70 #include "core/resampler_limits.h"
71 #include "core/uhjfilter.h"
72 #include "core/voice.h"
73 #include "core/voice_change.h"
74 #include "intrusive_ptr.h"
75 #include "opthelpers.h"
76 #include "ringbuffer.h"
77 #include "strutils.h"
78 #include "threads.h"
79 #include "vecmat.h"
80 #include "vector.h"
82 struct CTag;
83 #ifdef HAVE_SSE
84 struct SSETag;
85 #endif
86 #ifdef HAVE_SSE2
87 struct SSE2Tag;
88 #endif
89 #ifdef HAVE_SSE4_1
90 struct SSE4Tag;
91 #endif
92 #ifdef HAVE_NEON
93 struct NEONTag;
94 #endif
95 struct PointTag;
96 struct LerpTag;
97 struct CubicTag;
98 struct BSincTag;
99 struct FastBSincTag;
102 static_assert(!(MaxResamplerPadding&1), "MaxResamplerPadding is not a multiple of two");
105 namespace {
107 using uint = unsigned int;
109 constexpr uint MaxPitch{10};
111 static_assert((BufferLineSize-1)/MaxPitch > 0, "MaxPitch is too large for BufferLineSize!");
112 static_assert((INT_MAX>>MixerFracBits)/MaxPitch > BufferLineSize,
113 "MaxPitch and/or BufferLineSize are too large for MixerFracBits!");
115 using namespace std::placeholders;
117 float InitConeScale()
119 float ret{1.0f};
120 if(auto optval = al::getenv("__ALSOFT_HALF_ANGLE_CONES"))
122 if(al::strcasecmp(optval->c_str(), "true") == 0
123 || strtol(optval->c_str(), nullptr, 0) == 1)
124 ret *= 0.5f;
126 return ret;
128 /* Cone scalar */
129 const float ConeScale{InitConeScale()};
131 /* Localized scalars for mono sources (initialized in aluInit, after
132 * configuration is loaded).
134 float XScale{1.0f};
135 float YScale{1.0f};
136 float ZScale{1.0f};
138 } // namespace
140 namespace {
142 struct ChanMap {
143 Channel channel;
144 float angle;
145 float elevation;
148 using HrtfDirectMixerFunc = void(*)(const FloatBufferSpan LeftOut, const FloatBufferSpan RightOut,
149 const al::span<const FloatBufferLine> InSamples, float2 *AccumSamples, float *TempBuf,
150 HrtfChannelState *ChanState, const size_t IrSize, const size_t BufferSize);
152 HrtfDirectMixerFunc MixDirectHrtf{MixDirectHrtf_<CTag>};
154 inline HrtfDirectMixerFunc SelectHrtfMixer(void)
156 #ifdef HAVE_NEON
157 if((CPUCapFlags&CPU_CAP_NEON))
158 return MixDirectHrtf_<NEONTag>;
159 #endif
160 #ifdef HAVE_SSE
161 if((CPUCapFlags&CPU_CAP_SSE))
162 return MixDirectHrtf_<SSETag>;
163 #endif
165 return MixDirectHrtf_<CTag>;
169 inline void BsincPrepare(const uint increment, BsincState *state, const BSincTable *table)
171 size_t si{BSincScaleCount - 1};
172 float sf{0.0f};
174 if(increment > MixerFracOne)
176 sf = MixerFracOne/static_cast<float>(increment) - table->scaleBase;
177 sf = maxf(0.0f, BSincScaleCount*sf*table->scaleRange - 1.0f);
178 si = float2uint(sf);
179 /* The interpolation factor is fit to this diagonally-symmetric curve
180 * to reduce the transition ripple caused by interpolating different
181 * scales of the sinc function.
183 sf = 1.0f - std::cos(std::asin(sf - static_cast<float>(si)));
186 state->sf = sf;
187 state->m = table->m[si];
188 state->l = (state->m/2) - 1;
189 state->filter = table->Tab + table->filterOffset[si];
192 inline ResamplerFunc SelectResampler(Resampler resampler, uint increment)
194 switch(resampler)
196 case Resampler::Point:
197 return Resample_<PointTag,CTag>;
198 case Resampler::Linear:
199 #ifdef HAVE_NEON
200 if((CPUCapFlags&CPU_CAP_NEON))
201 return Resample_<LerpTag,NEONTag>;
202 #endif
203 #ifdef HAVE_SSE4_1
204 if((CPUCapFlags&CPU_CAP_SSE4_1))
205 return Resample_<LerpTag,SSE4Tag>;
206 #endif
207 #ifdef HAVE_SSE2
208 if((CPUCapFlags&CPU_CAP_SSE2))
209 return Resample_<LerpTag,SSE2Tag>;
210 #endif
211 return Resample_<LerpTag,CTag>;
212 case Resampler::Cubic:
213 return Resample_<CubicTag,CTag>;
214 case Resampler::BSinc12:
215 case Resampler::BSinc24:
216 if(increment <= MixerFracOne)
218 /* fall-through */
219 case Resampler::FastBSinc12:
220 case Resampler::FastBSinc24:
221 #ifdef HAVE_NEON
222 if((CPUCapFlags&CPU_CAP_NEON))
223 return Resample_<FastBSincTag,NEONTag>;
224 #endif
225 #ifdef HAVE_SSE
226 if((CPUCapFlags&CPU_CAP_SSE))
227 return Resample_<FastBSincTag,SSETag>;
228 #endif
229 return Resample_<FastBSincTag,CTag>;
231 #ifdef HAVE_NEON
232 if((CPUCapFlags&CPU_CAP_NEON))
233 return Resample_<BSincTag,NEONTag>;
234 #endif
235 #ifdef HAVE_SSE
236 if((CPUCapFlags&CPU_CAP_SSE))
237 return Resample_<BSincTag,SSETag>;
238 #endif
239 return Resample_<BSincTag,CTag>;
242 return Resample_<PointTag,CTag>;
245 } // namespace
247 void aluInit(CompatFlagBitset flags)
249 MixDirectHrtf = SelectHrtfMixer();
250 XScale = flags.test(CompatFlags::ReverseX) ? -1.0f : 1.0f;
251 YScale = flags.test(CompatFlags::ReverseY) ? -1.0f : 1.0f;
252 ZScale = flags.test(CompatFlags::ReverseZ) ? -1.0f : 1.0f;
256 ResamplerFunc PrepareResampler(Resampler resampler, uint increment, InterpState *state)
258 switch(resampler)
260 case Resampler::Point:
261 case Resampler::Linear:
262 case Resampler::Cubic:
263 break;
264 case Resampler::FastBSinc12:
265 case Resampler::BSinc12:
266 BsincPrepare(increment, &state->bsinc, &bsinc12);
267 break;
268 case Resampler::FastBSinc24:
269 case Resampler::BSinc24:
270 BsincPrepare(increment, &state->bsinc, &bsinc24);
271 break;
273 return SelectResampler(resampler, increment);
277 void DeviceBase::ProcessHrtf(const size_t SamplesToDo)
279 /* HRTF is stereo output only. */
280 const uint lidx{RealOut.ChannelIndex[FrontLeft]};
281 const uint ridx{RealOut.ChannelIndex[FrontRight]};
283 MixDirectHrtf(RealOut.Buffer[lidx], RealOut.Buffer[ridx], Dry.Buffer, HrtfAccumData,
284 mHrtfState->mTemp.data(), mHrtfState->mChannels.data(), mHrtfState->mIrSize, SamplesToDo);
287 void DeviceBase::ProcessAmbiDec(const size_t SamplesToDo)
289 AmbiDecoder->process(RealOut.Buffer, Dry.Buffer.data(), SamplesToDo);
292 void DeviceBase::ProcessAmbiDecStablized(const size_t SamplesToDo)
294 /* Decode with front image stablization. */
295 const uint lidx{RealOut.ChannelIndex[FrontLeft]};
296 const uint ridx{RealOut.ChannelIndex[FrontRight]};
297 const uint cidx{RealOut.ChannelIndex[FrontCenter]};
299 AmbiDecoder->processStablize(RealOut.Buffer, Dry.Buffer.data(), lidx, ridx, cidx,
300 SamplesToDo);
303 void DeviceBase::ProcessUhj(const size_t SamplesToDo)
305 /* UHJ is stereo output only. */
306 const uint lidx{RealOut.ChannelIndex[FrontLeft]};
307 const uint ridx{RealOut.ChannelIndex[FrontRight]};
309 /* Encode to stereo-compatible 2-channel UHJ output. */
310 mUhjEncoder->encode(RealOut.Buffer[lidx].data(), RealOut.Buffer[ridx].data(),
311 Dry.Buffer.data(), SamplesToDo);
314 void DeviceBase::ProcessBs2b(const size_t SamplesToDo)
316 /* First, decode the ambisonic mix to the "real" output. */
317 AmbiDecoder->process(RealOut.Buffer, Dry.Buffer.data(), SamplesToDo);
319 /* BS2B is stereo output only. */
320 const uint lidx{RealOut.ChannelIndex[FrontLeft]};
321 const uint ridx{RealOut.ChannelIndex[FrontRight]};
323 /* Now apply the BS2B binaural/crossfeed filter. */
324 bs2b_cross_feed(Bs2b.get(), RealOut.Buffer[lidx].data(), RealOut.Buffer[ridx].data(),
325 SamplesToDo);
329 namespace {
331 /* This RNG method was created based on the math found in opusdec. It's quick,
332 * and starting with a seed value of 22222, is suitable for generating
333 * whitenoise.
335 inline uint dither_rng(uint *seed) noexcept
337 *seed = (*seed * 96314165) + 907633515;
338 return *seed;
342 inline auto& GetAmbiScales(AmbiScaling scaletype) noexcept
344 switch(scaletype)
346 case AmbiScaling::FuMa: return AmbiScale::FromFuMa();
347 case AmbiScaling::SN3D: return AmbiScale::FromSN3D();
348 case AmbiScaling::UHJ: return AmbiScale::FromUHJ();
349 case AmbiScaling::N3D: break;
351 return AmbiScale::FromN3D();
354 inline auto& GetAmbiLayout(AmbiLayout layouttype) noexcept
356 if(layouttype == AmbiLayout::FuMa) return AmbiIndex::FromFuMa();
357 return AmbiIndex::FromACN();
360 inline auto& GetAmbi2DLayout(AmbiLayout layouttype) noexcept
362 if(layouttype == AmbiLayout::FuMa) return AmbiIndex::FromFuMa2D();
363 return AmbiIndex::FromACN2D();
367 bool CalcContextParams(ContextBase *ctx)
369 ContextProps *props{ctx->mParams.ContextUpdate.exchange(nullptr, std::memory_order_acq_rel)};
370 if(!props) return false;
372 const alu::Vector pos{props->Position[0], props->Position[1], props->Position[2], 1.0f};
373 ctx->mParams.Position = pos;
375 /* AT then UP */
376 alu::Vector N{props->OrientAt[0], props->OrientAt[1], props->OrientAt[2], 0.0f};
377 N.normalize();
378 alu::Vector V{props->OrientUp[0], props->OrientUp[1], props->OrientUp[2], 0.0f};
379 V.normalize();
380 /* Build and normalize right-vector */
381 alu::Vector U{N.cross_product(V)};
382 U.normalize();
384 const alu::Matrix rot{
385 U[0], V[0], -N[0], 0.0,
386 U[1], V[1], -N[1], 0.0,
387 U[2], V[2], -N[2], 0.0,
388 0.0, 0.0, 0.0, 1.0};
389 const alu::Vector vel{props->Velocity[0], props->Velocity[1], props->Velocity[2], 0.0};
391 ctx->mParams.Matrix = rot;
392 ctx->mParams.Velocity = rot * vel;
394 ctx->mParams.Gain = props->Gain * ctx->mGainBoost;
395 ctx->mParams.MetersPerUnit = props->MetersPerUnit;
396 ctx->mParams.AirAbsorptionGainHF = props->AirAbsorptionGainHF;
398 ctx->mParams.DopplerFactor = props->DopplerFactor;
399 ctx->mParams.SpeedOfSound = props->SpeedOfSound * props->DopplerVelocity;
401 ctx->mParams.SourceDistanceModel = props->SourceDistanceModel;
402 ctx->mParams.mDistanceModel = props->mDistanceModel;
404 AtomicReplaceHead(ctx->mFreeContextProps, props);
405 return true;
408 bool CalcEffectSlotParams(EffectSlot *slot, EffectSlot **sorted_slots, ContextBase *context)
410 EffectSlotProps *props{slot->Update.exchange(nullptr, std::memory_order_acq_rel)};
411 if(!props) return false;
413 /* If the effect slot target changed, clear the first sorted entry to force
414 * a re-sort.
416 if(slot->Target != props->Target)
417 *sorted_slots = nullptr;
418 slot->Gain = props->Gain;
419 slot->AuxSendAuto = props->AuxSendAuto;
420 slot->Target = props->Target;
421 slot->EffectType = props->Type;
422 slot->mEffectProps = props->Props;
423 if(props->Type == EffectSlotType::Reverb || props->Type == EffectSlotType::EAXReverb)
425 slot->RoomRolloff = props->Props.Reverb.RoomRolloffFactor;
426 slot->DecayTime = props->Props.Reverb.DecayTime;
427 slot->DecayLFRatio = props->Props.Reverb.DecayLFRatio;
428 slot->DecayHFRatio = props->Props.Reverb.DecayHFRatio;
429 slot->DecayHFLimit = props->Props.Reverb.DecayHFLimit;
430 slot->AirAbsorptionGainHF = props->Props.Reverb.AirAbsorptionGainHF;
432 else
434 slot->RoomRolloff = 0.0f;
435 slot->DecayTime = 0.0f;
436 slot->DecayLFRatio = 0.0f;
437 slot->DecayHFRatio = 0.0f;
438 slot->DecayHFLimit = false;
439 slot->AirAbsorptionGainHF = 1.0f;
442 EffectState *state{props->State.release()};
443 EffectState *oldstate{slot->mEffectState};
444 slot->mEffectState = state;
446 /* Only release the old state if it won't get deleted, since we can't be
447 * deleting/freeing anything in the mixer.
449 if(!oldstate->releaseIfNoDelete())
451 /* Otherwise, if it would be deleted send it off with a release event. */
452 RingBuffer *ring{context->mAsyncEvents.get()};
453 auto evt_vec = ring->getWriteVector();
454 if LIKELY(evt_vec.first.len > 0)
456 AsyncEvent *evt{al::construct_at(reinterpret_cast<AsyncEvent*>(evt_vec.first.buf),
457 AsyncEvent::ReleaseEffectState)};
458 evt->u.mEffectState = oldstate;
459 ring->writeAdvance(1);
461 else
463 /* If writing the event failed, the queue was probably full. Store
464 * the old state in the property object where it can eventually be
465 * cleaned up sometime later (not ideal, but better than blocking
466 * or leaking).
468 props->State.reset(oldstate);
472 AtomicReplaceHead(context->mFreeEffectslotProps, props);
474 EffectTarget output;
475 if(EffectSlot *target{slot->Target})
476 output = EffectTarget{&target->Wet, nullptr};
477 else
479 DeviceBase *device{context->mDevice};
480 output = EffectTarget{&device->Dry, &device->RealOut};
482 state->update(context, slot, &slot->mEffectProps, output);
483 return true;
487 /* Scales the given azimuth toward the side (+/- pi/2 radians) for positions in
488 * front.
490 inline float ScaleAzimuthFront(float azimuth, float scale)
492 const float abs_azi{std::fabs(azimuth)};
493 if(!(abs_azi >= al::numbers::pi_v<float>*0.5f))
494 return std::copysign(minf(abs_azi*scale, al::numbers::pi_v<float>*0.5f), azimuth);
495 return azimuth;
498 /* Wraps the given value in radians to stay between [-pi,+pi] */
499 inline float WrapRadians(float r)
501 static constexpr float Pi{al::numbers::pi_v<float>};
502 static constexpr float Pi2{Pi*2.0f};
503 if(r > Pi) return std::fmod(Pi+r, Pi2) - Pi;
504 if(r < -Pi) return Pi - std::fmod(Pi-r, Pi2);
505 return r;
508 /* Begin ambisonic rotation helpers.
510 * Rotating first-order B-Format just needs a straight-forward X/Y/Z rotation
511 * matrix. Higher orders, however, are more complicated. The method implemented
512 * here is a recursive algorithm (the rotation for first-order is used to help
513 * generate the second-order rotation, which helps generate the third-order
514 * rotation, etc).
516 * Adapted from
517 * <https://github.com/polarch/Spherical-Harmonic-Transform/blob/master/getSHrotMtx.m>,
518 * provided under the BSD 3-Clause license.
520 * Copyright (c) 2015, Archontis Politis
521 * Copyright (c) 2019, Christopher Robinson
523 * The u, v, and w coefficients used for generating higher-order rotations are
524 * precomputed since they're constant. The second-order coefficients are
525 * followed by the third-order coefficients, etc.
527 struct RotatorCoeffs {
528 float u, v, w;
530 template<size_t N0, size_t N1>
531 static std::array<RotatorCoeffs,N0+N1> ConcatArrays(const std::array<RotatorCoeffs,N0> &lhs,
532 const std::array<RotatorCoeffs,N1> &rhs)
534 std::array<RotatorCoeffs,N0+N1> ret;
535 auto iter = std::copy(lhs.cbegin(), lhs.cend(), ret.begin());
536 std::copy(rhs.cbegin(), rhs.cend(), iter);
537 return ret;
540 template<int l, int num_elems=l*2+1>
541 static std::array<RotatorCoeffs,num_elems*num_elems> GenCoeffs()
543 std::array<RotatorCoeffs,num_elems*num_elems> ret{};
544 auto coeffs = ret.begin();
546 for(int m{-l};m <= l;++m)
548 for(int n{-l};n <= l;++n)
550 // compute u,v,w terms of Eq.8.1 (Table I)
551 const bool d{m == 0}; // the delta function d_m0
552 const float denom{static_cast<float>((std::abs(n) == l) ?
553 (2*l) * (2*l - 1) : (l*l - n*n))};
555 const int abs_m{std::abs(m)};
556 coeffs->u = std::sqrt(static_cast<float>(l*l - m*m)/denom);
557 coeffs->v = std::sqrt(static_cast<float>(l+abs_m-1) * static_cast<float>(l+abs_m) /
558 denom) * (1.0f+d) * (1.0f - 2.0f*d) * 0.5f;
559 coeffs->w = std::sqrt(static_cast<float>(l-abs_m-1) * static_cast<float>(l-abs_m) /
560 denom) * (1.0f-d) * -0.5f;
561 ++coeffs;
565 return ret;
568 const auto RotatorCoeffArray = RotatorCoeffs::ConcatArrays(RotatorCoeffs::GenCoeffs<2>(),
569 RotatorCoeffs::GenCoeffs<3>());
572 * Given the matrix, pre-filled with the (zeroth- and) first-order rotation
573 * coefficients, this fills in the coefficients for the higher orders up to and
574 * including the given order. The matrix is in ACN layout.
576 void AmbiRotator(std::array<std::array<float,MaxAmbiChannels>,MaxAmbiChannels> &matrix,
577 const int order)
579 /* Don't do anything for < 2nd order. */
580 if(order < 2) return;
582 auto P = [](const int i, const int l, const int a, const int n, const size_t last_band,
583 const std::array<std::array<float,MaxAmbiChannels>,MaxAmbiChannels> &R)
585 const float ri1{ R[static_cast<uint>(i+2)][ 1+2]};
586 const float rim1{R[static_cast<uint>(i+2)][-1+2]};
587 const float ri0{ R[static_cast<uint>(i+2)][ 0+2]};
589 auto vec = R[static_cast<uint>(a+l-1) + last_band].cbegin() + last_band;
590 if(n == -l)
591 return ri1*vec[0] + rim1*vec[static_cast<uint>(l-1)*size_t{2}];
592 if(n == l)
593 return ri1*vec[static_cast<uint>(l-1)*size_t{2}] - rim1*vec[0];
594 return ri0*vec[static_cast<uint>(n+l-1)];
597 auto U = [P](const int l, const int m, const int n, const size_t last_band,
598 const std::array<std::array<float,MaxAmbiChannels>,MaxAmbiChannels> &R)
600 return P(0, l, m, n, last_band, R);
602 auto V = [P](const int l, const int m, const int n, const size_t last_band,
603 const std::array<std::array<float,MaxAmbiChannels>,MaxAmbiChannels> &R)
605 using namespace al::numbers;
606 if(m > 0)
608 const bool d{m == 1};
609 const float p0{P( 1, l, m-1, n, last_band, R)};
610 const float p1{P(-1, l, -m+1, n, last_band, R)};
611 return d ? p0*sqrt2_v<float> : (p0 - p1);
613 const bool d{m == -1};
614 const float p0{P( 1, l, m+1, n, last_band, R)};
615 const float p1{P(-1, l, -m-1, n, last_band, R)};
616 return d ? p1*sqrt2_v<float> : (p0 + p1);
618 auto W = [P](const int l, const int m, const int n, const size_t last_band,
619 const std::array<std::array<float,MaxAmbiChannels>,MaxAmbiChannels> &R)
621 assert(m != 0);
622 if(m > 0)
624 const float p0{P( 1, l, m+1, n, last_band, R)};
625 const float p1{P(-1, l, -m-1, n, last_band, R)};
626 return p0 + p1;
628 const float p0{P( 1, l, m-1, n, last_band, R)};
629 const float p1{P(-1, l, -m+1, n, last_band, R)};
630 return p0 - p1;
633 // compute rotation matrix of each subsequent band recursively
634 auto coeffs = RotatorCoeffArray.cbegin();
635 size_t band_idx{4}, last_band{1};
636 for(int l{2};l <= order;++l)
638 size_t y{band_idx};
639 for(int m{-l};m <= l;++m,++y)
641 size_t x{band_idx};
642 for(int n{-l};n <= l;++n,++x)
644 float r{0.0f};
646 // computes Eq.8.1
647 const float u{coeffs->u};
648 if(u != 0.0f) r += u * U(l, m, n, last_band, matrix);
649 const float v{coeffs->v};
650 if(v != 0.0f) r += v * V(l, m, n, last_band, matrix);
651 const float w{coeffs->w};
652 if(w != 0.0f) r += w * W(l, m, n, last_band, matrix);
654 matrix[y][x] = r;
655 ++coeffs;
658 last_band = band_idx;
659 band_idx += static_cast<uint>(l)*size_t{2} + 1;
662 /* End ambisonic rotation helpers. */
665 constexpr float Deg2Rad(float x) noexcept
666 { return static_cast<float>(al::numbers::pi / 180.0 * x); }
668 struct GainTriplet { float Base, HF, LF; };
670 void CalcPanningAndFilters(Voice *voice, const float xpos, const float ypos, const float zpos,
671 const float Distance, const float Spread, const GainTriplet &DryGain,
672 const al::span<const GainTriplet,MAX_SENDS> WetGain, EffectSlot *(&SendSlots)[MAX_SENDS],
673 const VoiceProps *props, const ContextParams &Context, const DeviceBase *Device)
675 static constexpr ChanMap MonoMap[1]{
676 { FrontCenter, 0.0f, 0.0f }
677 }, RearMap[2]{
678 { BackLeft, Deg2Rad(-150.0f), Deg2Rad(0.0f) },
679 { BackRight, Deg2Rad( 150.0f), Deg2Rad(0.0f) }
680 }, QuadMap[4]{
681 { FrontLeft, Deg2Rad( -45.0f), Deg2Rad(0.0f) },
682 { FrontRight, Deg2Rad( 45.0f), Deg2Rad(0.0f) },
683 { BackLeft, Deg2Rad(-135.0f), Deg2Rad(0.0f) },
684 { BackRight, Deg2Rad( 135.0f), Deg2Rad(0.0f) }
685 }, X51Map[6]{
686 { FrontLeft, Deg2Rad( -30.0f), Deg2Rad(0.0f) },
687 { FrontRight, Deg2Rad( 30.0f), Deg2Rad(0.0f) },
688 { FrontCenter, Deg2Rad( 0.0f), Deg2Rad(0.0f) },
689 { LFE, 0.0f, 0.0f },
690 { SideLeft, Deg2Rad(-110.0f), Deg2Rad(0.0f) },
691 { SideRight, Deg2Rad( 110.0f), Deg2Rad(0.0f) }
692 }, X61Map[7]{
693 { FrontLeft, Deg2Rad(-30.0f), Deg2Rad(0.0f) },
694 { FrontRight, Deg2Rad( 30.0f), Deg2Rad(0.0f) },
695 { FrontCenter, Deg2Rad( 0.0f), Deg2Rad(0.0f) },
696 { LFE, 0.0f, 0.0f },
697 { BackCenter, Deg2Rad(180.0f), Deg2Rad(0.0f) },
698 { SideLeft, Deg2Rad(-90.0f), Deg2Rad(0.0f) },
699 { SideRight, Deg2Rad( 90.0f), Deg2Rad(0.0f) }
700 }, X71Map[8]{
701 { FrontLeft, Deg2Rad( -30.0f), Deg2Rad(0.0f) },
702 { FrontRight, Deg2Rad( 30.0f), Deg2Rad(0.0f) },
703 { FrontCenter, Deg2Rad( 0.0f), Deg2Rad(0.0f) },
704 { LFE, 0.0f, 0.0f },
705 { BackLeft, Deg2Rad(-150.0f), Deg2Rad(0.0f) },
706 { BackRight, Deg2Rad( 150.0f), Deg2Rad(0.0f) },
707 { SideLeft, Deg2Rad( -90.0f), Deg2Rad(0.0f) },
708 { SideRight, Deg2Rad( 90.0f), Deg2Rad(0.0f) }
711 ChanMap StereoMap[2]{
712 { FrontLeft, Deg2Rad(-30.0f), Deg2Rad(0.0f) },
713 { FrontRight, Deg2Rad( 30.0f), Deg2Rad(0.0f) }
716 const auto Frequency = static_cast<float>(Device->Frequency);
717 const uint NumSends{Device->NumAuxSends};
719 const size_t num_channels{voice->mChans.size()};
720 ASSUME(num_channels > 0);
722 for(auto &chandata : voice->mChans)
724 chandata.mDryParams.Hrtf.Target = HrtfFilter{};
725 chandata.mDryParams.Gains.Target.fill(0.0f);
726 std::for_each(chandata.mWetParams.begin(), chandata.mWetParams.begin()+NumSends,
727 [](SendParams &params) -> void { params.Gains.Target.fill(0.0f); });
730 DirectMode DirectChannels{props->DirectChannels};
731 const ChanMap *chans{nullptr};
732 switch(voice->mFmtChannels)
734 case FmtMono:
735 chans = MonoMap;
736 /* Mono buffers are never played direct. */
737 DirectChannels = DirectMode::Off;
738 break;
740 case FmtStereo:
741 if(DirectChannels == DirectMode::Off)
743 /* Convert counter-clockwise to clock-wise, and wrap between
744 * [-pi,+pi].
746 StereoMap[0].angle = WrapRadians(-props->StereoPan[0]);
747 StereoMap[1].angle = WrapRadians(-props->StereoPan[1]);
749 chans = StereoMap;
750 break;
752 case FmtRear: chans = RearMap; break;
753 case FmtQuad: chans = QuadMap; break;
754 case FmtX51: chans = X51Map; break;
755 case FmtX61: chans = X61Map; break;
756 case FmtX71: chans = X71Map; break;
758 case FmtBFormat2D:
759 case FmtBFormat3D:
760 case FmtUHJ2:
761 case FmtUHJ3:
762 case FmtUHJ4:
763 case FmtSuperStereo:
764 DirectChannels = DirectMode::Off;
765 break;
768 voice->mFlags.reset(VoiceHasHrtf).reset(VoiceHasNfc);
769 if(auto *decoder{voice->mDecoder.get()})
770 decoder->mWidthControl = minf(props->EnhWidth, 0.7f);
772 if(IsAmbisonic(voice->mFmtChannels))
774 /* Special handling for B-Format and UHJ sources. */
776 if(Device->AvgSpeakerDist > 0.0f && voice->mFmtChannels != FmtUHJ2
777 && voice->mFmtChannels != FmtSuperStereo)
779 if(!(Distance > std::numeric_limits<float>::epsilon()))
781 /* NOTE: The NFCtrlFilters were created with a w0 of 0, which
782 * is what we want for FOA input. The first channel may have
783 * been previously re-adjusted if panned, so reset it.
785 voice->mChans[0].mDryParams.NFCtrlFilter.adjust(0.0f);
787 else
789 /* Clamp the distance for really close sources, to prevent
790 * excessive bass.
792 const float mdist{maxf(Distance, Device->AvgSpeakerDist/4.0f)};
793 const float w0{SpeedOfSoundMetersPerSec / (mdist * Frequency)};
795 /* Only need to adjust the first channel of a B-Format source. */
796 voice->mChans[0].mDryParams.NFCtrlFilter.adjust(w0);
799 voice->mFlags.set(VoiceHasNfc);
802 /* Panning a B-Format sound toward some direction is easy. Just pan the
803 * first (W) channel as a normal mono sound. The angular spread is used
804 * as a directional scalar to blend between full coverage and full
805 * panning.
807 const float coverage{!(Distance > std::numeric_limits<float>::epsilon()) ? 1.0f :
808 (al::numbers::inv_pi_v<float>/2.0f * Spread)};
810 auto calc_coeffs = [xpos,ypos,zpos](RenderMode mode)
812 if(mode != RenderMode::Pairwise)
813 return CalcDirectionCoeffs({xpos, ypos, zpos}, 0.0f);
815 /* Clamp Y, in case rounding errors caused it to end up outside
816 * of -1...+1.
818 const float ev{std::asin(clampf(ypos, -1.0f, 1.0f))};
819 /* Negate Z for right-handed coords with -Z in front. */
820 const float az{std::atan2(xpos, -zpos)};
822 /* A scalar of 1.5 for plain stereo results in +/-60 degrees
823 * being moved to +/-90 degrees for direct right and left
824 * speaker responses.
826 return CalcAngleCoeffs(ScaleAzimuthFront(az, 1.5f), ev, 0.0f);
828 auto coeffs = calc_coeffs(Device->mRenderMode);
829 std::transform(coeffs.begin()+1, coeffs.end(), coeffs.begin()+1,
830 std::bind(std::multiplies<float>{}, _1, 1.0f-coverage));
832 /* NOTE: W needs to be scaled according to channel scaling. */
833 auto&& scales = GetAmbiScales(voice->mAmbiScaling);
834 ComputePanGains(&Device->Dry, coeffs.data(), DryGain.Base*scales[0],
835 voice->mChans[0].mDryParams.Gains.Target);
836 for(uint i{0};i < NumSends;i++)
838 if(const EffectSlot *Slot{SendSlots[i]})
839 ComputePanGains(&Slot->Wet, coeffs.data(), WetGain[i].Base*scales[0],
840 voice->mChans[0].mWetParams[i].Gains.Target);
843 if(coverage > 0.0f)
845 /* Local B-Format sources have their XYZ channels rotated according
846 * to the orientation.
848 /* AT then UP */
849 alu::Vector N{props->OrientAt[0], props->OrientAt[1], props->OrientAt[2], 0.0f};
850 N.normalize();
851 alu::Vector V{props->OrientUp[0], props->OrientUp[1], props->OrientUp[2], 0.0f};
852 V.normalize();
853 if(!props->HeadRelative)
855 N = Context.Matrix * N;
856 V = Context.Matrix * V;
858 /* Build and normalize right-vector */
859 alu::Vector U{N.cross_product(V)};
860 U.normalize();
862 /* Build a rotation matrix. Manually fill the zeroth- and first-
863 * order elements, then construct the rotation for the higher
864 * orders.
866 std::array<std::array<float,MaxAmbiChannels>,MaxAmbiChannels> shrot{};
867 shrot[0][0] = 1.0f;
868 shrot[1][1] = U[0]; shrot[1][2] = -V[0]; shrot[1][3] = -N[0];
869 shrot[2][1] = -U[1]; shrot[2][2] = V[1]; shrot[2][3] = N[1];
870 shrot[3][1] = U[2]; shrot[3][2] = -V[2]; shrot[3][3] = -N[2];
871 AmbiRotator(shrot, static_cast<int>(minu(voice->mAmbiOrder, Device->mAmbiOrder)));
873 /* Convert the rotation matrix for input ordering and scaling, and
874 * whether input is 2D or 3D.
876 const uint8_t *index_map{Is2DAmbisonic(voice->mFmtChannels) ?
877 GetAmbi2DLayout(voice->mAmbiLayout).data() :
878 GetAmbiLayout(voice->mAmbiLayout).data()};
880 static const uint8_t ChansPerOrder[MaxAmbiOrder+1]{1, 3, 5, 7,};
881 static const uint8_t OrderOffset[MaxAmbiOrder+1]{0, 1, 4, 9,};
882 for(size_t c{1};c < num_channels;c++)
884 const size_t acn{index_map[c]};
885 const size_t order{AmbiIndex::OrderFromChannel()[acn]};
886 const size_t tocopy{ChansPerOrder[order]};
887 const size_t offset{OrderOffset[order]};
888 const float scale{scales[acn] * coverage};
889 auto in = shrot.cbegin() + offset;
891 coeffs = std::array<float,MaxAmbiChannels>{};
892 for(size_t x{0};x < tocopy;++x)
893 coeffs[offset+x] = in[x][acn] * scale;
895 ComputePanGains(&Device->Dry, coeffs.data(), DryGain.Base,
896 voice->mChans[c].mDryParams.Gains.Target);
898 for(uint i{0};i < NumSends;i++)
900 if(const EffectSlot *Slot{SendSlots[i]})
901 ComputePanGains(&Slot->Wet, coeffs.data(), WetGain[i].Base,
902 voice->mChans[c].mWetParams[i].Gains.Target);
907 else if(DirectChannels != DirectMode::Off && !Device->RealOut.RemixMap.empty())
909 /* Direct source channels always play local. Skip the virtual channels
910 * and write inputs to the matching real outputs.
912 voice->mDirect.Buffer = Device->RealOut.Buffer;
914 for(size_t c{0};c < num_channels;c++)
916 uint idx{GetChannelIdxByName(Device->RealOut, chans[c].channel)};
917 if(idx != INVALID_CHANNEL_INDEX)
918 voice->mChans[c].mDryParams.Gains.Target[idx] = DryGain.Base;
919 else if(DirectChannels == DirectMode::RemixMismatch)
921 auto match_channel = [chans,c](const InputRemixMap &map) noexcept -> bool
922 { return chans[c].channel == map.channel; };
923 auto remap = std::find_if(Device->RealOut.RemixMap.cbegin(),
924 Device->RealOut.RemixMap.cend(), match_channel);
925 if(remap != Device->RealOut.RemixMap.cend())
927 for(const auto &target : remap->targets)
929 idx = GetChannelIdxByName(Device->RealOut, target.channel);
930 if(idx != INVALID_CHANNEL_INDEX)
931 voice->mChans[c].mDryParams.Gains.Target[idx] = DryGain.Base *
932 target.mix;
938 /* Auxiliary sends still use normal channel panning since they mix to
939 * B-Format, which can't channel-match.
941 for(size_t c{0};c < num_channels;c++)
943 const auto coeffs = CalcAngleCoeffs(chans[c].angle, chans[c].elevation, 0.0f);
945 for(uint i{0};i < NumSends;i++)
947 if(const EffectSlot *Slot{SendSlots[i]})
948 ComputePanGains(&Slot->Wet, coeffs.data(), WetGain[i].Base,
949 voice->mChans[c].mWetParams[i].Gains.Target);
953 else if(Device->mRenderMode == RenderMode::Hrtf)
955 /* Full HRTF rendering. Skip the virtual channels and render to the
956 * real outputs.
958 voice->mDirect.Buffer = Device->RealOut.Buffer;
960 if(Distance > std::numeric_limits<float>::epsilon())
962 const float ev{std::asin(clampf(ypos, -1.0f, 1.0f))};
963 const float az{std::atan2(xpos, -zpos)};
965 /* Get the HRIR coefficients and delays just once, for the given
966 * source direction.
968 GetHrtfCoeffs(Device->mHrtf.get(), ev, az, Distance, Spread,
969 voice->mChans[0].mDryParams.Hrtf.Target.Coeffs,
970 voice->mChans[0].mDryParams.Hrtf.Target.Delay);
971 voice->mChans[0].mDryParams.Hrtf.Target.Gain = DryGain.Base;
973 /* Remaining channels use the same results as the first. */
974 for(size_t c{1};c < num_channels;c++)
976 /* Skip LFE */
977 if(chans[c].channel == LFE) continue;
978 voice->mChans[c].mDryParams.Hrtf.Target = voice->mChans[0].mDryParams.Hrtf.Target;
981 /* Calculate the directional coefficients once, which apply to all
982 * input channels of the source sends.
984 const auto coeffs = CalcDirectionCoeffs({xpos, ypos, zpos}, Spread);
986 for(size_t c{0};c < num_channels;c++)
988 /* Skip LFE */
989 if(chans[c].channel == LFE)
990 continue;
991 for(uint i{0};i < NumSends;i++)
993 if(const EffectSlot *Slot{SendSlots[i]})
994 ComputePanGains(&Slot->Wet, coeffs.data(), WetGain[i].Base,
995 voice->mChans[c].mWetParams[i].Gains.Target);
999 else
1001 /* Local sources on HRTF play with each channel panned to its
1002 * relative location around the listener, providing "virtual
1003 * speaker" responses.
1005 for(size_t c{0};c < num_channels;c++)
1007 /* Skip LFE */
1008 if(chans[c].channel == LFE)
1009 continue;
1011 /* Get the HRIR coefficients and delays for this channel
1012 * position.
1014 GetHrtfCoeffs(Device->mHrtf.get(), chans[c].elevation, chans[c].angle,
1015 std::numeric_limits<float>::infinity(), Spread,
1016 voice->mChans[c].mDryParams.Hrtf.Target.Coeffs,
1017 voice->mChans[c].mDryParams.Hrtf.Target.Delay);
1018 voice->mChans[c].mDryParams.Hrtf.Target.Gain = DryGain.Base;
1020 /* Normal panning for auxiliary sends. */
1021 const auto coeffs = CalcAngleCoeffs(chans[c].angle, chans[c].elevation, Spread);
1023 for(uint i{0};i < NumSends;i++)
1025 if(const EffectSlot *Slot{SendSlots[i]})
1026 ComputePanGains(&Slot->Wet, coeffs.data(), WetGain[i].Base,
1027 voice->mChans[c].mWetParams[i].Gains.Target);
1032 voice->mFlags.set(VoiceHasHrtf);
1034 else
1036 /* Non-HRTF rendering. Use normal panning to the output. */
1038 if(Distance > std::numeric_limits<float>::epsilon())
1040 /* Calculate NFC filter coefficient if needed. */
1041 if(Device->AvgSpeakerDist > 0.0f)
1043 /* Clamp the distance for really close sources, to prevent
1044 * excessive bass.
1046 const float mdist{maxf(Distance, Device->AvgSpeakerDist/4.0f)};
1047 const float w0{SpeedOfSoundMetersPerSec / (mdist * Frequency)};
1049 /* Adjust NFC filters. */
1050 for(size_t c{0};c < num_channels;c++)
1051 voice->mChans[c].mDryParams.NFCtrlFilter.adjust(w0);
1053 voice->mFlags.set(VoiceHasNfc);
1056 /* Calculate the directional coefficients once, which apply to all
1057 * input channels.
1059 auto calc_coeffs = [xpos,ypos,zpos,Spread](RenderMode mode)
1061 if(mode != RenderMode::Pairwise)
1062 return CalcDirectionCoeffs({xpos, ypos, zpos}, Spread);
1063 const float ev{std::asin(clampf(ypos, -1.0f, 1.0f))};
1064 const float az{std::atan2(xpos, -zpos)};
1065 return CalcAngleCoeffs(ScaleAzimuthFront(az, 1.5f), ev, Spread);
1067 const auto coeffs = calc_coeffs(Device->mRenderMode);
1069 for(size_t c{0};c < num_channels;c++)
1071 /* Special-case LFE */
1072 if(chans[c].channel == LFE)
1074 if(Device->Dry.Buffer.data() == Device->RealOut.Buffer.data())
1076 const uint idx{GetChannelIdxByName(Device->RealOut, chans[c].channel)};
1077 if(idx != INVALID_CHANNEL_INDEX)
1078 voice->mChans[c].mDryParams.Gains.Target[idx] = DryGain.Base;
1080 continue;
1083 ComputePanGains(&Device->Dry, coeffs.data(), DryGain.Base,
1084 voice->mChans[c].mDryParams.Gains.Target);
1085 for(uint i{0};i < NumSends;i++)
1087 if(const EffectSlot *Slot{SendSlots[i]})
1088 ComputePanGains(&Slot->Wet, coeffs.data(), WetGain[i].Base,
1089 voice->mChans[c].mWetParams[i].Gains.Target);
1093 else
1095 if(Device->AvgSpeakerDist > 0.0f)
1097 /* If the source distance is 0, simulate a plane-wave by using
1098 * infinite distance, which results in a w0 of 0.
1100 static constexpr float w0{0.0f};
1101 for(size_t c{0};c < num_channels;c++)
1102 voice->mChans[c].mDryParams.NFCtrlFilter.adjust(w0);
1104 voice->mFlags.set(VoiceHasNfc);
1107 for(size_t c{0};c < num_channels;c++)
1109 /* Special-case LFE */
1110 if(chans[c].channel == LFE)
1112 if(Device->Dry.Buffer.data() == Device->RealOut.Buffer.data())
1114 const uint idx{GetChannelIdxByName(Device->RealOut, chans[c].channel)};
1115 if(idx != INVALID_CHANNEL_INDEX)
1116 voice->mChans[c].mDryParams.Gains.Target[idx] = DryGain.Base;
1118 continue;
1121 const auto coeffs = CalcAngleCoeffs((Device->mRenderMode == RenderMode::Pairwise)
1122 ? ScaleAzimuthFront(chans[c].angle, 3.0f) : chans[c].angle,
1123 chans[c].elevation, Spread);
1125 ComputePanGains(&Device->Dry, coeffs.data(), DryGain.Base,
1126 voice->mChans[c].mDryParams.Gains.Target);
1127 for(uint i{0};i < NumSends;i++)
1129 if(const EffectSlot *Slot{SendSlots[i]})
1130 ComputePanGains(&Slot->Wet, coeffs.data(), WetGain[i].Base,
1131 voice->mChans[c].mWetParams[i].Gains.Target);
1138 const float hfNorm{props->Direct.HFReference / Frequency};
1139 const float lfNorm{props->Direct.LFReference / Frequency};
1141 voice->mDirect.FilterType = AF_None;
1142 if(DryGain.HF != 1.0f) voice->mDirect.FilterType |= AF_LowPass;
1143 if(DryGain.LF != 1.0f) voice->mDirect.FilterType |= AF_HighPass;
1145 auto &lowpass = voice->mChans[0].mDryParams.LowPass;
1146 auto &highpass = voice->mChans[0].mDryParams.HighPass;
1147 lowpass.setParamsFromSlope(BiquadType::HighShelf, hfNorm, DryGain.HF, 1.0f);
1148 highpass.setParamsFromSlope(BiquadType::LowShelf, lfNorm, DryGain.LF, 1.0f);
1149 for(size_t c{1};c < num_channels;c++)
1151 voice->mChans[c].mDryParams.LowPass.copyParamsFrom(lowpass);
1152 voice->mChans[c].mDryParams.HighPass.copyParamsFrom(highpass);
1155 for(uint i{0};i < NumSends;i++)
1157 const float hfNorm{props->Send[i].HFReference / Frequency};
1158 const float lfNorm{props->Send[i].LFReference / Frequency};
1160 voice->mSend[i].FilterType = AF_None;
1161 if(WetGain[i].HF != 1.0f) voice->mSend[i].FilterType |= AF_LowPass;
1162 if(WetGain[i].LF != 1.0f) voice->mSend[i].FilterType |= AF_HighPass;
1164 auto &lowpass = voice->mChans[0].mWetParams[i].LowPass;
1165 auto &highpass = voice->mChans[0].mWetParams[i].HighPass;
1166 lowpass.setParamsFromSlope(BiquadType::HighShelf, hfNorm, WetGain[i].HF, 1.0f);
1167 highpass.setParamsFromSlope(BiquadType::LowShelf, lfNorm, WetGain[i].LF, 1.0f);
1168 for(size_t c{1};c < num_channels;c++)
1170 voice->mChans[c].mWetParams[i].LowPass.copyParamsFrom(lowpass);
1171 voice->mChans[c].mWetParams[i].HighPass.copyParamsFrom(highpass);
1176 void CalcNonAttnSourceParams(Voice *voice, const VoiceProps *props, const ContextBase *context)
1178 const DeviceBase *Device{context->mDevice};
1179 EffectSlot *SendSlots[MAX_SENDS];
1181 voice->mDirect.Buffer = Device->Dry.Buffer;
1182 for(uint i{0};i < Device->NumAuxSends;i++)
1184 SendSlots[i] = props->Send[i].Slot;
1185 if(!SendSlots[i] || SendSlots[i]->EffectType == EffectSlotType::None)
1187 SendSlots[i] = nullptr;
1188 voice->mSend[i].Buffer = {};
1190 else
1191 voice->mSend[i].Buffer = SendSlots[i]->Wet.Buffer;
1194 /* Calculate the stepping value */
1195 const auto Pitch = static_cast<float>(voice->mFrequency) /
1196 static_cast<float>(Device->Frequency) * props->Pitch;
1197 if(Pitch > float{MaxPitch})
1198 voice->mStep = MaxPitch<<MixerFracBits;
1199 else
1200 voice->mStep = maxu(fastf2u(Pitch * MixerFracOne), 1);
1201 voice->mResampler = PrepareResampler(props->mResampler, voice->mStep, &voice->mResampleState);
1203 /* Calculate gains */
1204 GainTriplet DryGain;
1205 DryGain.Base = minf(clampf(props->Gain, props->MinGain, props->MaxGain) * props->Direct.Gain *
1206 context->mParams.Gain, GainMixMax);
1207 DryGain.HF = props->Direct.GainHF;
1208 DryGain.LF = props->Direct.GainLF;
1209 GainTriplet WetGain[MAX_SENDS];
1210 for(uint i{0};i < Device->NumAuxSends;i++)
1212 WetGain[i].Base = minf(clampf(props->Gain, props->MinGain, props->MaxGain) *
1213 props->Send[i].Gain * context->mParams.Gain, GainMixMax);
1214 WetGain[i].HF = props->Send[i].GainHF;
1215 WetGain[i].LF = props->Send[i].GainLF;
1218 CalcPanningAndFilters(voice, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, DryGain, WetGain, SendSlots, props,
1219 context->mParams, Device);
1222 void CalcAttnSourceParams(Voice *voice, const VoiceProps *props, const ContextBase *context)
1224 const DeviceBase *Device{context->mDevice};
1225 const uint NumSends{Device->NumAuxSends};
1227 /* Set mixing buffers and get send parameters. */
1228 voice->mDirect.Buffer = Device->Dry.Buffer;
1229 EffectSlot *SendSlots[MAX_SENDS];
1230 uint UseDryAttnForRoom{0};
1231 for(uint i{0};i < NumSends;i++)
1233 SendSlots[i] = props->Send[i].Slot;
1234 if(!SendSlots[i] || SendSlots[i]->EffectType == EffectSlotType::None)
1235 SendSlots[i] = nullptr;
1236 else if(!SendSlots[i]->AuxSendAuto)
1238 /* If the slot's auxiliary send auto is off, the data sent to the
1239 * effect slot is the same as the dry path, sans filter effects.
1241 UseDryAttnForRoom |= 1u<<i;
1244 if(!SendSlots[i])
1245 voice->mSend[i].Buffer = {};
1246 else
1247 voice->mSend[i].Buffer = SendSlots[i]->Wet.Buffer;
1250 /* Transform source to listener space (convert to head relative) */
1251 alu::Vector Position{props->Position[0], props->Position[1], props->Position[2], 1.0f};
1252 alu::Vector Velocity{props->Velocity[0], props->Velocity[1], props->Velocity[2], 0.0f};
1253 alu::Vector Direction{props->Direction[0], props->Direction[1], props->Direction[2], 0.0f};
1254 if(!props->HeadRelative)
1256 /* Transform source vectors */
1257 Position = context->mParams.Matrix * (Position - context->mParams.Position);
1258 Velocity = context->mParams.Matrix * Velocity;
1259 Direction = context->mParams.Matrix * Direction;
1261 else
1263 /* Offset the source velocity to be relative of the listener velocity */
1264 Velocity += context->mParams.Velocity;
1267 const bool directional{Direction.normalize() > 0.0f};
1268 alu::Vector ToSource{Position[0], Position[1], Position[2], 0.0f};
1269 const float Distance{ToSource.normalize()};
1271 /* Calculate distance attenuation */
1272 float ClampedDist{Distance};
1273 float DryGainBase{props->Gain};
1274 float WetGainBase{props->Gain};
1276 switch(context->mParams.SourceDistanceModel ? props->mDistanceModel
1277 : context->mParams.mDistanceModel)
1279 case DistanceModel::InverseClamped:
1280 if(props->MaxDistance < props->RefDistance) break;
1281 ClampedDist = clampf(ClampedDist, props->RefDistance, props->MaxDistance);
1282 /*fall-through*/
1283 case DistanceModel::Inverse:
1284 if(props->RefDistance > 0.0f)
1286 float dist{lerpf(props->RefDistance, ClampedDist, props->RolloffFactor)};
1287 if(dist > 0.0f) DryGainBase *= props->RefDistance / dist;
1289 dist = lerpf(props->RefDistance, ClampedDist, props->RoomRolloffFactor);
1290 if(dist > 0.0f) WetGainBase *= props->RefDistance / dist;
1292 break;
1294 case DistanceModel::LinearClamped:
1295 if(props->MaxDistance < props->RefDistance) break;
1296 ClampedDist = clampf(ClampedDist, props->RefDistance, props->MaxDistance);
1297 /*fall-through*/
1298 case DistanceModel::Linear:
1299 if(props->MaxDistance != props->RefDistance)
1301 float attn{(ClampedDist-props->RefDistance) /
1302 (props->MaxDistance-props->RefDistance) * props->RolloffFactor};
1303 DryGainBase *= maxf(1.0f - attn, 0.0f);
1305 attn = (ClampedDist-props->RefDistance) /
1306 (props->MaxDistance-props->RefDistance) * props->RoomRolloffFactor;
1307 WetGainBase *= maxf(1.0f - attn, 0.0f);
1309 break;
1311 case DistanceModel::ExponentClamped:
1312 if(props->MaxDistance < props->RefDistance) break;
1313 ClampedDist = clampf(ClampedDist, props->RefDistance, props->MaxDistance);
1314 /*fall-through*/
1315 case DistanceModel::Exponent:
1316 if(ClampedDist > 0.0f && props->RefDistance > 0.0f)
1318 const float dist_ratio{ClampedDist/props->RefDistance};
1319 DryGainBase *= std::pow(dist_ratio, -props->RolloffFactor);
1320 WetGainBase *= std::pow(dist_ratio, -props->RoomRolloffFactor);
1322 break;
1324 case DistanceModel::Disable:
1325 break;
1328 /* Calculate directional soundcones */
1329 float ConeHF{1.0f}, WetConeHF{1.0f};
1330 if(directional && props->InnerAngle < 360.0f)
1332 static constexpr float Rad2Deg{static_cast<float>(180.0 / al::numbers::pi)};
1333 const float Angle{Rad2Deg*2.0f * std::acos(-Direction.dot_product(ToSource)) * ConeScale};
1335 float ConeGain{1.0f};
1336 if(Angle >= props->OuterAngle)
1338 ConeGain = props->OuterGain;
1339 ConeHF = lerpf(1.0f, props->OuterGainHF, props->DryGainHFAuto);
1341 else if(Angle >= props->InnerAngle)
1343 const float scale{(Angle-props->InnerAngle) / (props->OuterAngle-props->InnerAngle)};
1344 ConeGain = lerpf(1.0f, props->OuterGain, scale);
1345 ConeHF = lerpf(1.0f, props->OuterGainHF, scale * props->DryGainHFAuto);
1348 DryGainBase *= ConeGain;
1349 WetGainBase *= lerpf(1.0f, ConeGain, props->WetGainAuto);
1351 WetConeHF = lerpf(1.0f, ConeHF, props->WetGainHFAuto);
1354 /* Apply gain and frequency filters */
1355 DryGainBase = clampf(DryGainBase, props->MinGain, props->MaxGain) * context->mParams.Gain;
1356 WetGainBase = clampf(WetGainBase, props->MinGain, props->MaxGain) * context->mParams.Gain;
1358 GainTriplet DryGain{};
1359 DryGain.Base = minf(DryGainBase * props->Direct.Gain, GainMixMax);
1360 DryGain.HF = ConeHF * props->Direct.GainHF;
1361 DryGain.LF = props->Direct.GainLF;
1362 GainTriplet WetGain[MAX_SENDS]{};
1363 for(uint i{0};i < NumSends;i++)
1365 /* If this effect slot's Auxiliary Send Auto is off, then use the dry
1366 * path distance and cone attenuation, otherwise use the wet (room)
1367 * path distance and cone attenuation. The send filter is used instead
1368 * of the direct filter, regardless.
1370 const bool use_room{!(UseDryAttnForRoom&(1u<<i))};
1371 const float gain{use_room ? WetGainBase : DryGainBase};
1372 WetGain[i].Base = minf(gain * props->Send[i].Gain, GainMixMax);
1373 WetGain[i].HF = (use_room ? WetConeHF : ConeHF) * props->Send[i].GainHF;
1374 WetGain[i].LF = props->Send[i].GainLF;
1377 /* Distance-based air absorption and initial send decay. */
1378 if(likely(Distance > props->RefDistance))
1380 const float distance_base{(Distance-props->RefDistance) * props->RolloffFactor};
1381 const float absorption{distance_base * context->mParams.MetersPerUnit *
1382 props->AirAbsorptionFactor};
1383 if(absorption > std::numeric_limits<float>::epsilon())
1385 const float hfattn{std::pow(context->mParams.AirAbsorptionGainHF, absorption)};
1386 DryGain.HF *= hfattn;
1387 for(uint i{0u};i < NumSends;++i)
1388 WetGain[i].HF *= hfattn;
1391 /* If the source's Auxiliary Send Filter Gain Auto is off, no extra
1392 * adjustment is applied to the send gains.
1394 for(uint i{props->WetGainAuto ? 0u : NumSends};i < NumSends;++i)
1396 if(!SendSlots[i])
1397 continue;
1399 auto calc_attenuation = [](float distance, float refdist, float rolloff) noexcept
1401 const float dist{lerpf(refdist, distance, rolloff)};
1402 if(dist > refdist) return refdist / dist;
1403 return 1.0f;
1406 /* The reverb effect's room rolloff factor always applies to an
1407 * inverse distance rolloff model.
1409 WetGain[i].Base *= calc_attenuation(Distance, props->RefDistance,
1410 SendSlots[i]->RoomRolloff);
1412 /* If this effect slot's Auxiliary Send Auto is off, don't apply
1413 * the automatic initial reverb decay (should the reverb's room
1414 * rolloff still apply?).
1416 if(!SendSlots[i]->AuxSendAuto)
1417 continue;
1419 GainTriplet DecayDistance;
1420 /* Calculate the distances to where this effect's decay reaches
1421 * -60dB.
1423 DecayDistance.Base = SendSlots[i]->DecayTime * SpeedOfSoundMetersPerSec;
1424 DecayDistance.LF = DecayDistance.Base * SendSlots[i]->DecayLFRatio;
1425 DecayDistance.HF = DecayDistance.Base * SendSlots[i]->DecayHFRatio;
1426 if(SendSlots[i]->DecayHFLimit)
1428 const float airAbsorption{SendSlots[i]->AirAbsorptionGainHF};
1429 if(airAbsorption < 1.0f)
1431 /* Calculate the distance to where this effect's air
1432 * absorption reaches -60dB, and limit the effect's HF
1433 * decay distance (so it doesn't take any longer to decay
1434 * than the air would allow).
1436 static constexpr float log10_decaygain{-3.0f/*std::log10(ReverbDecayGain)*/};
1437 const float absorb_dist{log10_decaygain / std::log10(airAbsorption)};
1438 DecayDistance.HF = minf(absorb_dist, DecayDistance.HF);
1442 const float baseAttn = calc_attenuation(Distance, props->RefDistance,
1443 props->RolloffFactor);
1445 /* Apply a decay-time transformation to the wet path, based on the
1446 * source distance. The initial decay of the reverb effect is
1447 * calculated and applied to the wet path.
1449 const float fact{distance_base / DecayDistance.Base};
1450 const float gain{std::pow(ReverbDecayGain, fact)*(1.0f-baseAttn) + baseAttn};
1451 WetGain[i].Base *= gain;
1453 if(gain > 0.0f)
1455 const float hffact{distance_base / DecayDistance.HF};
1456 const float gainhf{std::pow(ReverbDecayGain, hffact)*(1.0f-baseAttn) + baseAttn};
1457 WetGain[i].HF *= minf(gainhf/gain, 1.0f);
1458 const float lffact{distance_base / DecayDistance.LF};
1459 const float gainlf{std::pow(ReverbDecayGain, lffact)*(1.0f-baseAttn) + baseAttn};
1460 WetGain[i].LF *= minf(gainlf/gain, 1.0f);
1466 /* Initial source pitch */
1467 float Pitch{props->Pitch};
1469 /* Calculate velocity-based doppler effect */
1470 float DopplerFactor{props->DopplerFactor * context->mParams.DopplerFactor};
1471 if(DopplerFactor > 0.0f)
1473 const alu::Vector &lvelocity = context->mParams.Velocity;
1474 float vss{Velocity.dot_product(ToSource) * -DopplerFactor};
1475 float vls{lvelocity.dot_product(ToSource) * -DopplerFactor};
1477 const float SpeedOfSound{context->mParams.SpeedOfSound};
1478 if(!(vls < SpeedOfSound))
1480 /* Listener moving away from the source at the speed of sound.
1481 * Sound waves can't catch it.
1483 Pitch = 0.0f;
1485 else if(!(vss < SpeedOfSound))
1487 /* Source moving toward the listener at the speed of sound. Sound
1488 * waves bunch up to extreme frequencies.
1490 Pitch = std::numeric_limits<float>::infinity();
1492 else
1494 /* Source and listener movement is nominal. Calculate the proper
1495 * doppler shift.
1497 Pitch *= (SpeedOfSound-vls) / (SpeedOfSound-vss);
1501 /* Adjust pitch based on the buffer and output frequencies, and calculate
1502 * fixed-point stepping value.
1504 Pitch *= static_cast<float>(voice->mFrequency) / static_cast<float>(Device->Frequency);
1505 if(Pitch > float{MaxPitch})
1506 voice->mStep = MaxPitch<<MixerFracBits;
1507 else
1508 voice->mStep = maxu(fastf2u(Pitch * MixerFracOne), 1);
1509 voice->mResampler = PrepareResampler(props->mResampler, voice->mStep, &voice->mResampleState);
1511 float spread{0.0f};
1512 if(props->Radius > Distance)
1513 spread = al::numbers::pi_v<float>*2.0f - Distance/props->Radius*al::numbers::pi_v<float>;
1514 else if(Distance > 0.0f)
1515 spread = std::asin(props->Radius/Distance) * 2.0f;
1517 CalcPanningAndFilters(voice, ToSource[0]*XScale, ToSource[1]*YScale, ToSource[2]*ZScale,
1518 Distance*context->mParams.MetersPerUnit, spread, DryGain, WetGain, SendSlots, props,
1519 context->mParams, Device);
1522 void CalcSourceParams(Voice *voice, ContextBase *context, bool force)
1524 VoicePropsItem *props{voice->mUpdate.exchange(nullptr, std::memory_order_acq_rel)};
1525 if(!props && !force) return;
1527 if(props)
1529 voice->mProps = *props;
1531 AtomicReplaceHead(context->mFreeVoiceProps, props);
1534 if((voice->mProps.DirectChannels != DirectMode::Off && voice->mFmtChannels != FmtMono
1535 && !IsAmbisonic(voice->mFmtChannels))
1536 || voice->mProps.mSpatializeMode == SpatializeMode::Off
1537 || (voice->mProps.mSpatializeMode==SpatializeMode::Auto && voice->mFmtChannels != FmtMono))
1538 CalcNonAttnSourceParams(voice, &voice->mProps, context);
1539 else
1540 CalcAttnSourceParams(voice, &voice->mProps, context);
1544 void SendSourceStateEvent(ContextBase *context, uint id, VChangeState state)
1546 RingBuffer *ring{context->mAsyncEvents.get()};
1547 auto evt_vec = ring->getWriteVector();
1548 if(evt_vec.first.len < 1) return;
1550 AsyncEvent *evt{al::construct_at(reinterpret_cast<AsyncEvent*>(evt_vec.first.buf),
1551 AsyncEvent::SourceStateChange)};
1552 evt->u.srcstate.id = id;
1553 switch(state)
1555 case VChangeState::Reset:
1556 evt->u.srcstate.state = AsyncEvent::SrcState::Reset;
1557 break;
1558 case VChangeState::Stop:
1559 evt->u.srcstate.state = AsyncEvent::SrcState::Stop;
1560 break;
1561 case VChangeState::Play:
1562 evt->u.srcstate.state = AsyncEvent::SrcState::Play;
1563 break;
1564 case VChangeState::Pause:
1565 evt->u.srcstate.state = AsyncEvent::SrcState::Pause;
1566 break;
1567 /* Shouldn't happen. */
1568 case VChangeState::Restart:
1569 ASSUME(0);
1572 ring->writeAdvance(1);
1575 void ProcessVoiceChanges(ContextBase *ctx)
1577 VoiceChange *cur{ctx->mCurrentVoiceChange.load(std::memory_order_acquire)};
1578 VoiceChange *next{cur->mNext.load(std::memory_order_acquire)};
1579 if(!next) return;
1581 const uint enabledevt{ctx->mEnabledEvts.load(std::memory_order_acquire)};
1582 do {
1583 cur = next;
1585 bool sendevt{false};
1586 if(cur->mState == VChangeState::Reset || cur->mState == VChangeState::Stop)
1588 if(Voice *voice{cur->mVoice})
1590 voice->mCurrentBuffer.store(nullptr, std::memory_order_relaxed);
1591 voice->mLoopBuffer.store(nullptr, std::memory_order_relaxed);
1592 /* A source ID indicates the voice was playing or paused, which
1593 * gets a reset/stop event.
1595 sendevt = voice->mSourceID.exchange(0u, std::memory_order_relaxed) != 0u;
1596 Voice::State oldvstate{Voice::Playing};
1597 voice->mPlayState.compare_exchange_strong(oldvstate, Voice::Stopping,
1598 std::memory_order_relaxed, std::memory_order_acquire);
1599 voice->mPendingChange.store(false, std::memory_order_release);
1601 /* Reset state change events are always sent, even if the voice is
1602 * already stopped or even if there is no voice.
1604 sendevt |= (cur->mState == VChangeState::Reset);
1606 else if(cur->mState == VChangeState::Pause)
1608 Voice *voice{cur->mVoice};
1609 Voice::State oldvstate{Voice::Playing};
1610 sendevt = voice->mPlayState.compare_exchange_strong(oldvstate, Voice::Stopping,
1611 std::memory_order_release, std::memory_order_acquire);
1613 else if(cur->mState == VChangeState::Play)
1615 /* NOTE: When playing a voice, sending a source state change event
1616 * depends if there's an old voice to stop and if that stop is
1617 * successful. If there is no old voice, a playing event is always
1618 * sent. If there is an old voice, an event is sent only if the
1619 * voice is already stopped.
1621 if(Voice *oldvoice{cur->mOldVoice})
1623 oldvoice->mCurrentBuffer.store(nullptr, std::memory_order_relaxed);
1624 oldvoice->mLoopBuffer.store(nullptr, std::memory_order_relaxed);
1625 oldvoice->mSourceID.store(0u, std::memory_order_relaxed);
1626 Voice::State oldvstate{Voice::Playing};
1627 sendevt = !oldvoice->mPlayState.compare_exchange_strong(oldvstate, Voice::Stopping,
1628 std::memory_order_relaxed, std::memory_order_acquire);
1629 oldvoice->mPendingChange.store(false, std::memory_order_release);
1631 else
1632 sendevt = true;
1634 Voice *voice{cur->mVoice};
1635 voice->mPlayState.store(Voice::Playing, std::memory_order_release);
1637 else if(cur->mState == VChangeState::Restart)
1639 /* Restarting a voice never sends a source change event. */
1640 Voice *oldvoice{cur->mOldVoice};
1641 oldvoice->mCurrentBuffer.store(nullptr, std::memory_order_relaxed);
1642 oldvoice->mLoopBuffer.store(nullptr, std::memory_order_relaxed);
1643 /* If there's no sourceID, the old voice finished so don't start
1644 * the new one at its new offset.
1646 if(oldvoice->mSourceID.exchange(0u, std::memory_order_relaxed) != 0u)
1648 /* Otherwise, set the voice to stopping if it's not already (it
1649 * might already be, if paused), and play the new voice as
1650 * appropriate.
1652 Voice::State oldvstate{Voice::Playing};
1653 oldvoice->mPlayState.compare_exchange_strong(oldvstate, Voice::Stopping,
1654 std::memory_order_relaxed, std::memory_order_acquire);
1656 Voice *voice{cur->mVoice};
1657 voice->mPlayState.store((oldvstate == Voice::Playing) ? Voice::Playing
1658 : Voice::Stopped, std::memory_order_release);
1660 oldvoice->mPendingChange.store(false, std::memory_order_release);
1662 if(sendevt && (enabledevt&AsyncEvent::SourceStateChange))
1663 SendSourceStateEvent(ctx, cur->mSourceID, cur->mState);
1665 next = cur->mNext.load(std::memory_order_acquire);
1666 } while(next);
1667 ctx->mCurrentVoiceChange.store(cur, std::memory_order_release);
1670 void ProcessParamUpdates(ContextBase *ctx, const EffectSlotArray &slots,
1671 const al::span<Voice*> voices)
1673 ProcessVoiceChanges(ctx);
1675 IncrementRef(ctx->mUpdateCount);
1676 if LIKELY(!ctx->mHoldUpdates.load(std::memory_order_acquire))
1678 bool force{CalcContextParams(ctx)};
1679 auto sorted_slots = const_cast<EffectSlot**>(slots.data() + slots.size());
1680 for(EffectSlot *slot : slots)
1681 force |= CalcEffectSlotParams(slot, sorted_slots, ctx);
1683 for(Voice *voice : voices)
1685 /* Only update voices that have a source. */
1686 if(voice->mSourceID.load(std::memory_order_relaxed) != 0)
1687 CalcSourceParams(voice, ctx, force);
1690 IncrementRef(ctx->mUpdateCount);
1693 void ProcessContexts(DeviceBase *device, const uint SamplesToDo)
1695 ASSUME(SamplesToDo > 0);
1697 for(ContextBase *ctx : *device->mContexts.load(std::memory_order_acquire))
1699 const EffectSlotArray &auxslots = *ctx->mActiveAuxSlots.load(std::memory_order_acquire);
1700 const al::span<Voice*> voices{ctx->getVoicesSpanAcquired()};
1702 /* Process pending propery updates for objects on the context. */
1703 ProcessParamUpdates(ctx, auxslots, voices);
1705 /* Clear auxiliary effect slot mixing buffers. */
1706 for(EffectSlot *slot : auxslots)
1708 for(auto &buffer : slot->Wet.Buffer)
1709 buffer.fill(0.0f);
1712 /* Process voices that have a playing source. */
1713 for(Voice *voice : voices)
1715 const Voice::State vstate{voice->mPlayState.load(std::memory_order_acquire)};
1716 if(vstate != Voice::Stopped && vstate != Voice::Pending)
1717 voice->mix(vstate, ctx, SamplesToDo);
1720 /* Process effects. */
1721 if(const size_t num_slots{auxslots.size()})
1723 auto slots = auxslots.data();
1724 auto slots_end = slots + num_slots;
1726 /* Sort the slots into extra storage, so that effect slots come
1727 * before their effect slot target (or their targets' target).
1729 const al::span<EffectSlot*> sorted_slots{const_cast<EffectSlot**>(slots_end),
1730 num_slots};
1731 /* Skip sorting if it has already been done. */
1732 if(!sorted_slots[0])
1734 /* First, copy the slots to the sorted list, then partition the
1735 * sorted list so that all slots without a target slot go to
1736 * the end.
1738 std::copy(slots, slots_end, sorted_slots.begin());
1739 auto split_point = std::partition(sorted_slots.begin(), sorted_slots.end(),
1740 [](const EffectSlot *slot) noexcept -> bool
1741 { return slot->Target != nullptr; });
1742 /* There must be at least one slot without a slot target. */
1743 assert(split_point != sorted_slots.end());
1745 /* Simple case: no more than 1 slot has a target slot. Either
1746 * all slots go right to the output, or the remaining one must
1747 * target an already-partitioned slot.
1749 if(split_point - sorted_slots.begin() > 1)
1751 /* At least two slots target other slots. Starting from the
1752 * back of the sorted list, continue partitioning the front
1753 * of the list given each target until all targets are
1754 * accounted for. This ensures all slots without a target
1755 * go last, all slots directly targeting those last slots
1756 * go second-to-last, all slots directly targeting those
1757 * second-last slots go third-to-last, etc.
1759 auto next_target = sorted_slots.end();
1760 do {
1761 /* This shouldn't happen, but if there's unsorted slots
1762 * left that don't target any sorted slots, they can't
1763 * contribute to the output, so leave them.
1765 if UNLIKELY(next_target == split_point)
1766 break;
1768 --next_target;
1769 split_point = std::partition(sorted_slots.begin(), split_point,
1770 [next_target](const EffectSlot *slot) noexcept -> bool
1771 { return slot->Target != *next_target; });
1772 } while(split_point - sorted_slots.begin() > 1);
1776 for(const EffectSlot *slot : sorted_slots)
1778 EffectState *state{slot->mEffectState};
1779 state->process(SamplesToDo, slot->Wet.Buffer, state->mOutTarget);
1783 /* Signal the event handler if there are any events to read. */
1784 RingBuffer *ring{ctx->mAsyncEvents.get()};
1785 if(ring->readSpace() > 0)
1786 ctx->mEventSem.post();
1791 void ApplyDistanceComp(const al::span<FloatBufferLine> Samples, const size_t SamplesToDo,
1792 const DistanceComp::ChanData *distcomp)
1794 ASSUME(SamplesToDo > 0);
1796 for(auto &chanbuffer : Samples)
1798 const float gain{distcomp->Gain};
1799 const size_t base{distcomp->Length};
1800 float *distbuf{al::assume_aligned<16>(distcomp->Buffer)};
1801 ++distcomp;
1803 if(base < 1)
1804 continue;
1806 float *inout{al::assume_aligned<16>(chanbuffer.data())};
1807 auto inout_end = inout + SamplesToDo;
1808 if LIKELY(SamplesToDo >= base)
1810 auto delay_end = std::rotate(inout, inout_end - base, inout_end);
1811 std::swap_ranges(inout, delay_end, distbuf);
1813 else
1815 auto delay_start = std::swap_ranges(inout, inout_end, distbuf);
1816 std::rotate(distbuf, delay_start, distbuf + base);
1818 std::transform(inout, inout_end, inout, std::bind(std::multiplies<float>{}, _1, gain));
1822 void ApplyDither(const al::span<FloatBufferLine> Samples, uint *dither_seed,
1823 const float quant_scale, const size_t SamplesToDo)
1825 ASSUME(SamplesToDo > 0);
1827 /* Dithering. Generate whitenoise (uniform distribution of random values
1828 * between -1 and +1) and add it to the sample values, after scaling up to
1829 * the desired quantization depth amd before rounding.
1831 const float invscale{1.0f / quant_scale};
1832 uint seed{*dither_seed};
1833 auto dither_sample = [&seed,invscale,quant_scale](const float sample) noexcept -> float
1835 float val{sample * quant_scale};
1836 uint rng0{dither_rng(&seed)};
1837 uint rng1{dither_rng(&seed)};
1838 val += static_cast<float>(rng0*(1.0/UINT_MAX) - rng1*(1.0/UINT_MAX));
1839 return fast_roundf(val) * invscale;
1841 for(FloatBufferLine &inout : Samples)
1842 std::transform(inout.begin(), inout.begin()+SamplesToDo, inout.begin(), dither_sample);
1843 *dither_seed = seed;
1847 /* Base template left undefined. Should be marked =delete, but Clang 3.8.1
1848 * chokes on that given the inline specializations.
1850 template<typename T>
1851 inline T SampleConv(float) noexcept;
1853 template<> inline float SampleConv(float val) noexcept
1854 { return val; }
1855 template<> inline int32_t SampleConv(float val) noexcept
1857 /* Floats have a 23-bit mantissa, plus an implied 1 bit and a sign bit.
1858 * This means a normalized float has at most 25 bits of signed precision.
1859 * When scaling and clamping for a signed 32-bit integer, these following
1860 * values are the best a float can give.
1862 return fastf2i(clampf(val*2147483648.0f, -2147483648.0f, 2147483520.0f));
1864 template<> inline int16_t SampleConv(float val) noexcept
1865 { return static_cast<int16_t>(fastf2i(clampf(val*32768.0f, -32768.0f, 32767.0f))); }
1866 template<> inline int8_t SampleConv(float val) noexcept
1867 { return static_cast<int8_t>(fastf2i(clampf(val*128.0f, -128.0f, 127.0f))); }
1869 /* Define unsigned output variations. */
1870 template<> inline uint32_t SampleConv(float val) noexcept
1871 { return static_cast<uint32_t>(SampleConv<int32_t>(val)) + 2147483648u; }
1872 template<> inline uint16_t SampleConv(float val) noexcept
1873 { return static_cast<uint16_t>(SampleConv<int16_t>(val) + 32768); }
1874 template<> inline uint8_t SampleConv(float val) noexcept
1875 { return static_cast<uint8_t>(SampleConv<int8_t>(val) + 128); }
1877 template<DevFmtType T>
1878 void Write(const al::span<const FloatBufferLine> InBuffer, void *OutBuffer, const size_t Offset,
1879 const size_t SamplesToDo, const size_t FrameStep)
1881 ASSUME(FrameStep > 0);
1882 ASSUME(SamplesToDo > 0);
1884 DevFmtType_t<T> *outbase{static_cast<DevFmtType_t<T>*>(OutBuffer) + Offset*FrameStep};
1885 size_t c{0};
1886 for(const FloatBufferLine &inbuf : InBuffer)
1888 DevFmtType_t<T> *out{outbase++};
1889 auto conv_sample = [FrameStep,&out](const float s) noexcept -> void
1891 *out = SampleConv<DevFmtType_t<T>>(s);
1892 out += FrameStep;
1894 std::for_each(inbuf.begin(), inbuf.begin()+SamplesToDo, conv_sample);
1895 ++c;
1897 if(const size_t extra{FrameStep - c})
1899 const auto silence = SampleConv<DevFmtType_t<T>>(0.0f);
1900 for(size_t i{0};i < SamplesToDo;++i)
1902 std::fill_n(outbase, extra, silence);
1903 outbase += FrameStep;
1908 } // namespace
1910 uint DeviceBase::renderSamples(const uint numSamples)
1912 const uint samplesToDo{minu(numSamples, BufferLineSize)};
1914 /* Clear main mixing buffers. */
1915 for(FloatBufferLine &buffer : MixBuffer)
1916 buffer.fill(0.0f);
1918 /* Increment the mix count at the start (lsb should now be 1). */
1919 IncrementRef(MixCount);
1921 /* Process and mix each context's sources and effects. */
1922 ProcessContexts(this, samplesToDo);
1924 /* Increment the clock time. Every second's worth of samples is converted
1925 * and added to clock base so that large sample counts don't overflow
1926 * during conversion. This also guarantees a stable conversion.
1928 SamplesDone += samplesToDo;
1929 ClockBase += std::chrono::seconds{SamplesDone / Frequency};
1930 SamplesDone %= Frequency;
1932 /* Increment the mix count at the end (lsb should now be 0). */
1933 IncrementRef(MixCount);
1935 /* Apply any needed post-process for finalizing the Dry mix to the RealOut
1936 * (Ambisonic decode, UHJ encode, etc).
1938 postProcess(samplesToDo);
1940 /* Apply compression, limiting sample amplitude if needed or desired. */
1941 if(Limiter) Limiter->process(samplesToDo, RealOut.Buffer.data());
1943 /* Apply delays and attenuation for mismatched speaker distances. */
1944 if(ChannelDelays)
1945 ApplyDistanceComp(RealOut.Buffer, samplesToDo, ChannelDelays->mChannels.data());
1947 /* Apply dithering. The compressor should have left enough headroom for the
1948 * dither noise to not saturate.
1950 if(DitherDepth > 0.0f)
1951 ApplyDither(RealOut.Buffer, &DitherSeed, DitherDepth, samplesToDo);
1953 return samplesToDo;
1956 void DeviceBase::renderSamples(const al::span<float*> outBuffers, const uint numSamples)
1958 FPUCtl mixer_mode{};
1959 uint total{0};
1960 while(const uint todo{numSamples - total})
1962 const uint samplesToDo{renderSamples(todo)};
1964 auto *srcbuf = RealOut.Buffer.data();
1965 for(auto *dstbuf : outBuffers)
1967 std::copy_n(srcbuf->data(), samplesToDo, dstbuf + total);
1968 ++srcbuf;
1971 total += samplesToDo;
1975 void DeviceBase::renderSamples(void *outBuffer, const uint numSamples, const size_t frameStep)
1977 FPUCtl mixer_mode{};
1978 uint total{0};
1979 while(const uint todo{numSamples - total})
1981 const uint samplesToDo{renderSamples(todo)};
1983 if LIKELY(outBuffer)
1985 /* Finally, interleave and convert samples, writing to the device's
1986 * output buffer.
1988 switch(FmtType)
1990 #define HANDLE_WRITE(T) case T: \
1991 Write<T>(RealOut.Buffer, outBuffer, total, samplesToDo, frameStep); break;
1992 HANDLE_WRITE(DevFmtByte)
1993 HANDLE_WRITE(DevFmtUByte)
1994 HANDLE_WRITE(DevFmtShort)
1995 HANDLE_WRITE(DevFmtUShort)
1996 HANDLE_WRITE(DevFmtInt)
1997 HANDLE_WRITE(DevFmtUInt)
1998 HANDLE_WRITE(DevFmtFloat)
1999 #undef HANDLE_WRITE
2003 total += samplesToDo;
2007 void DeviceBase::handleDisconnect(const char *msg, ...)
2009 if(!Connected.exchange(false, std::memory_order_acq_rel))
2010 return;
2012 AsyncEvent evt{AsyncEvent::Disconnected};
2014 va_list args;
2015 va_start(args, msg);
2016 int msglen{vsnprintf(evt.u.disconnect.msg, sizeof(evt.u.disconnect.msg), msg, args)};
2017 va_end(args);
2019 if(msglen < 0 || static_cast<size_t>(msglen) >= sizeof(evt.u.disconnect.msg))
2020 evt.u.disconnect.msg[sizeof(evt.u.disconnect.msg)-1] = 0;
2022 IncrementRef(MixCount);
2023 for(ContextBase *ctx : *mContexts.load())
2025 const uint enabledevt{ctx->mEnabledEvts.load(std::memory_order_acquire)};
2026 if((enabledevt&AsyncEvent::Disconnected))
2028 RingBuffer *ring{ctx->mAsyncEvents.get()};
2029 auto evt_data = ring->getWriteVector().first;
2030 if(evt_data.len > 0)
2032 al::construct_at(reinterpret_cast<AsyncEvent*>(evt_data.buf), evt);
2033 ring->writeAdvance(1);
2034 ctx->mEventSem.post();
2038 if(!ctx->mStopVoicesOnDisconnect)
2040 ProcessVoiceChanges(ctx);
2041 continue;
2044 auto voicelist = ctx->getVoicesSpanAcquired();
2045 auto stop_voice = [](Voice *voice) -> void
2047 voice->mCurrentBuffer.store(nullptr, std::memory_order_relaxed);
2048 voice->mLoopBuffer.store(nullptr, std::memory_order_relaxed);
2049 voice->mSourceID.store(0u, std::memory_order_relaxed);
2050 voice->mPlayState.store(Voice::Stopped, std::memory_order_release);
2052 std::for_each(voicelist.begin(), voicelist.end(), stop_voice);
2054 IncrementRef(MixCount);