10 #include "mixer_defs.h"
13 const ALfloat
*Resample_lerp32_Neon(const InterpState
* UNUSED(state
),
14 const ALfloat
*restrict src
, ALsizei frac
, ALint increment
,
15 ALfloat
*restrict dst
, ALsizei numsamples
)
17 const int32x4_t increment4
= vdupq_n_s32(increment
*4);
18 const float32x4_t fracOne4
= vdupq_n_f32(1.0f
/FRACTIONONE
);
19 const int32x4_t fracMask4
= vdupq_n_s32(FRACTIONMASK
);
20 alignas(16) ALint pos_
[4];
21 alignas(16) ALsizei frac_
[4];
26 InitiatePositionArrays(frac
, increment
, frac_
, pos_
, 4);
28 frac4
= vld1q_s32(frac_
);
29 pos4
= vld1q_s32(pos_
);
31 for(i
= 0;numsamples
-i
> 3;i
+= 4)
33 const float32x4_t val1
= (float32x4_t
){src
[pos_
[0]], src
[pos_
[1]], src
[pos_
[2]], src
[pos_
[3]]};
34 const float32x4_t val2
= (float32x4_t
){src
[pos_
[0]+1], src
[pos_
[1]+1], src
[pos_
[2]+1], src
[pos_
[3]+1]};
36 /* val1 + (val2-val1)*mu */
37 const float32x4_t r0
= vsubq_f32(val2
, val1
);
38 const float32x4_t mu
= vmulq_f32(vcvtq_f32_s32(frac4
), fracOne4
);
39 const float32x4_t out
= vmlaq_f32(val1
, mu
, r0
);
41 vst1q_f32(&dst
[i
], out
);
43 frac4
= vaddq_s32(frac4
, increment4
);
44 pos4
= vaddq_s32(pos4
, vshrq_n_s32(frac4
, FRACTIONBITS
));
45 frac4
= vandq_s32(frac4
, fracMask4
);
47 vst1q_s32(pos_
, pos4
);
52 /* NOTE: These four elements represent the position *after* the last
53 * four samples, so the lowest element is the next position to
57 frac
= vgetq_lane_s32(frac4
, 0);
59 dst
[i
] = lerp(src
[pos
], src
[pos
+1], frac
* (1.0f
/FRACTIONONE
));
62 pos
+= frac
>>FRACTIONBITS
;
64 } while(++i
< numsamples
);
69 const ALfloat
*Resample_fir4_32_Neon(const InterpState
* UNUSED(state
),
70 const ALfloat
*restrict src
, ALsizei frac
, ALint increment
,
71 ALfloat
*restrict dst
, ALsizei numsamples
)
73 const int32x4_t increment4
= vdupq_n_s32(increment
*4);
74 const int32x4_t fracMask4
= vdupq_n_s32(FRACTIONMASK
);
75 alignas(16) ALint pos_
[4];
76 alignas(16) ALsizei frac_
[4];
81 InitiatePositionArrays(frac
, increment
, frac_
, pos_
, 4);
83 frac4
= vld1q_s32(frac_
);
84 pos4
= vld1q_s32(pos_
);
87 for(i
= 0;numsamples
-i
> 3;i
+= 4)
89 const float32x4_t val0
= vld1q_f32(&src
[pos_
[0]]);
90 const float32x4_t val1
= vld1q_f32(&src
[pos_
[1]]);
91 const float32x4_t val2
= vld1q_f32(&src
[pos_
[2]]);
92 const float32x4_t val3
= vld1q_f32(&src
[pos_
[3]]);
93 float32x4_t k0
= vld1q_f32(sinc4Tab
[frac_
[0]]);
94 float32x4_t k1
= vld1q_f32(sinc4Tab
[frac_
[1]]);
95 float32x4_t k2
= vld1q_f32(sinc4Tab
[frac_
[2]]);
96 float32x4_t k3
= vld1q_f32(sinc4Tab
[frac_
[3]]);
99 k0
= vmulq_f32(k0
, val0
);
100 k1
= vmulq_f32(k1
, val1
);
101 k2
= vmulq_f32(k2
, val2
);
102 k3
= vmulq_f32(k3
, val3
);
103 k0
= vcombine_f32(vpadd_f32(vget_low_f32(k0
), vget_high_f32(k0
)),
104 vpadd_f32(vget_low_f32(k1
), vget_high_f32(k1
)));
105 k2
= vcombine_f32(vpadd_f32(vget_low_f32(k2
), vget_high_f32(k2
)),
106 vpadd_f32(vget_low_f32(k3
), vget_high_f32(k3
)));
107 out
= vcombine_f32(vpadd_f32(vget_low_f32(k0
), vget_high_f32(k0
)),
108 vpadd_f32(vget_low_f32(k2
), vget_high_f32(k2
)));
110 vst1q_f32(&dst
[i
], out
);
112 frac4
= vaddq_s32(frac4
, increment4
);
113 pos4
= vaddq_s32(pos4
, vshrq_n_s32(frac4
, FRACTIONBITS
));
114 frac4
= vandq_s32(frac4
, fracMask4
);
116 vst1q_s32(pos_
, pos4
);
117 vst1q_s32(frac_
, frac4
);
122 /* NOTE: These four elements represent the position *after* the last
123 * four samples, so the lowest element is the next position to
129 dst
[i
] = resample_fir4(src
[pos
], src
[pos
+1], src
[pos
+2], src
[pos
+3], frac
);
132 pos
+= frac
>>FRACTIONBITS
;
133 frac
&= FRACTIONMASK
;
134 } while(++i
< numsamples
);
139 const ALfloat
*Resample_bsinc32_Neon(const InterpState
*state
,
140 const ALfloat
*restrict src
, ALsizei frac
, ALint increment
,
141 ALfloat
*restrict dst
, ALsizei dstlen
)
143 const float32x4_t sf4
= vdupq_n_f32(state
->bsinc
.sf
);
144 const ALsizei m
= state
->bsinc
.m
;
145 const ALfloat
*fil
, *scd
, *phd
, *spd
;
150 src
+= state
->bsinc
.l
;
151 for(i
= 0;i
< dstlen
;i
++)
153 // Calculate the phase index and factor.
154 #define FRAC_PHASE_BITDIFF (FRACTIONBITS-BSINC_PHASE_BITS)
155 pi
= frac
>> FRAC_PHASE_BITDIFF
;
156 pf
= (frac
& ((1<<FRAC_PHASE_BITDIFF
)-1)) * (1.0f
/(1<<FRAC_PHASE_BITDIFF
));
157 #undef FRAC_PHASE_BITDIFF
159 fil
= ASSUME_ALIGNED(state
->bsinc
.coeffs
[pi
].filter
, 16);
160 scd
= ASSUME_ALIGNED(state
->bsinc
.coeffs
[pi
].scDelta
, 16);
161 phd
= ASSUME_ALIGNED(state
->bsinc
.coeffs
[pi
].phDelta
, 16);
162 spd
= ASSUME_ALIGNED(state
->bsinc
.coeffs
[pi
].spDelta
, 16);
164 // Apply the scale and phase interpolated filter.
165 r4
= vdupq_n_f32(0.0f
);
167 const float32x4_t pf4
= vdupq_n_f32(pf
);
168 for(j
= 0;j
< m
;j
+=4)
170 /* f = ((fil + sf*scd) + pf*(phd + sf*spd)) */
171 const float32x4_t f4
= vmlaq_f32(vmlaq_f32(vld1q_f32(&fil
[j
]),
172 sf4
, vld1q_f32(&scd
[j
])),
173 pf4
, vmlaq_f32(vld1q_f32(&phd
[j
]),
174 sf4
, vld1q_f32(&spd
[j
])
178 r4
= vmlaq_f32(r4
, f4
, vld1q_f32(&src
[j
]));
181 r4
= vaddq_f32(r4
, vcombine_f32(vrev64_f32(vget_high_f32(r4
)),
182 vrev64_f32(vget_low_f32(r4
))));
183 dst
[i
] = vget_lane_f32(vadd_f32(vget_low_f32(r4
), vget_high_f32(r4
)), 0);
186 src
+= frac
>>FRACTIONBITS
;
187 frac
&= FRACTIONMASK
;
193 static inline void ApplyCoeffs(ALsizei Offset
, ALfloat (*restrict Values
)[2],
194 const ALsizei IrSize
,
195 const ALfloat (*restrict Coeffs
)[2],
196 ALfloat left
, ALfloat right
)
199 float32x4_t leftright4
;
201 float32x2_t leftright2
= vdup_n_f32(0.0);
202 leftright2
= vset_lane_f32(left
, leftright2
, 0);
203 leftright2
= vset_lane_f32(right
, leftright2
, 1);
204 leftright4
= vcombine_f32(leftright2
, leftright2
);
206 Values
= ASSUME_ALIGNED(Values
, 16);
207 Coeffs
= ASSUME_ALIGNED(Coeffs
, 16);
208 for(c
= 0;c
< IrSize
;c
+= 2)
210 const ALsizei o0
= (Offset
+c
)&HRIR_MASK
;
211 const ALsizei o1
= (o0
+1)&HRIR_MASK
;
212 float32x4_t vals
= vcombine_f32(vld1_f32((float32_t
*)&Values
[o0
][0]),
213 vld1_f32((float32_t
*)&Values
[o1
][0]));
214 float32x4_t coefs
= vld1q_f32((float32_t
*)&Coeffs
[c
][0]);
216 vals
= vmlaq_f32(vals
, coefs
, leftright4
);
218 vst1_f32((float32_t
*)&Values
[o0
][0], vget_low_f32(vals
));
219 vst1_f32((float32_t
*)&Values
[o1
][0], vget_high_f32(vals
));
223 #define MixHrtf MixHrtf_Neon
224 #define MixHrtfBlend MixHrtfBlend_Neon
225 #define MixDirectHrtf MixDirectHrtf_Neon
226 #include "mixer_inc.c"
230 void Mix_Neon(const ALfloat
*data
, ALsizei OutChans
, ALfloat (*restrict OutBuffer
)[BUFFERSIZE
],
231 ALfloat
*CurrentGains
, const ALfloat
*TargetGains
, ALsizei Counter
, ALsizei OutPos
,
234 ALfloat gain
, delta
, step
;
238 data
= ASSUME_ALIGNED(data
, 16);
239 OutBuffer
= ASSUME_ALIGNED(OutBuffer
, 16);
241 delta
= (Counter
> 0) ? 1.0f
/(ALfloat
)Counter
: 0.0f
;
243 for(c
= 0;c
< OutChans
;c
++)
246 gain
= CurrentGains
[c
];
247 step
= (TargetGains
[c
] - gain
) * delta
;
248 if(fabsf(step
) > FLT_EPSILON
)
250 ALsizei minsize
= mini(BufferSize
, Counter
);
251 /* Mix with applying gain steps in aligned multiples of 4. */
255 gain4
= vsetq_lane_f32(gain
, gain4
, 0);
256 gain4
= vsetq_lane_f32(gain
+ step
, gain4
, 1);
257 gain4
= vsetq_lane_f32(gain
+ step
+ step
, gain4
, 2);
258 gain4
= vsetq_lane_f32(gain
+ step
+ step
+ step
, gain4
, 3);
259 step4
= vdupq_n_f32(step
+ step
+ step
+ step
);
261 const float32x4_t val4
= vld1q_f32(&data
[pos
]);
262 float32x4_t dry4
= vld1q_f32(&OutBuffer
[c
][OutPos
+pos
]);
263 dry4
= vmlaq_f32(dry4
, val4
, gain4
);
264 gain4
= vaddq_f32(gain4
, step4
);
265 vst1q_f32(&OutBuffer
[c
][OutPos
+pos
], dry4
);
267 } while(minsize
-pos
> 3);
268 /* NOTE: gain4 now represents the next four gains after the
269 * last four mixed samples, so the lowest element represents
270 * the next gain to apply.
272 gain
= vgetq_lane_f32(gain4
, 0);
274 /* Mix with applying left over gain steps that aren't aligned multiples of 4. */
275 for(;pos
< minsize
;pos
++)
277 OutBuffer
[c
][OutPos
+pos
] += data
[pos
]*gain
;
281 gain
= TargetGains
[c
];
282 CurrentGains
[c
] = gain
;
284 /* Mix until pos is aligned with 4 or the mix is done. */
285 minsize
= mini(BufferSize
, (pos
+3)&~3);
286 for(;pos
< minsize
;pos
++)
287 OutBuffer
[c
][OutPos
+pos
] += data
[pos
]*gain
;
290 if(!(fabsf(gain
) > GAIN_SILENCE_THRESHOLD
))
292 gain4
= vdupq_n_f32(gain
);
293 for(;BufferSize
-pos
> 3;pos
+= 4)
295 const float32x4_t val4
= vld1q_f32(&data
[pos
]);
296 float32x4_t dry4
= vld1q_f32(&OutBuffer
[c
][OutPos
+pos
]);
297 dry4
= vmlaq_f32(dry4
, val4
, gain4
);
298 vst1q_f32(&OutBuffer
[c
][OutPos
+pos
], dry4
);
300 for(;pos
< BufferSize
;pos
++)
301 OutBuffer
[c
][OutPos
+pos
] += data
[pos
]*gain
;
305 void MixRow_Neon(ALfloat
*OutBuffer
, const ALfloat
*Gains
, const ALfloat (*restrict data
)[BUFFERSIZE
], ALsizei InChans
, ALsizei InPos
, ALsizei BufferSize
)
310 data
= ASSUME_ALIGNED(data
, 16);
311 OutBuffer
= ASSUME_ALIGNED(OutBuffer
, 16);
313 for(c
= 0;c
< InChans
;c
++)
316 ALfloat gain
= Gains
[c
];
317 if(!(fabsf(gain
) > GAIN_SILENCE_THRESHOLD
))
320 gain4
= vdupq_n_f32(gain
);
321 for(;BufferSize
-pos
> 3;pos
+= 4)
323 const float32x4_t val4
= vld1q_f32(&data
[c
][InPos
+pos
]);
324 float32x4_t dry4
= vld1q_f32(&OutBuffer
[pos
]);
325 dry4
= vmlaq_f32(dry4
, val4
, gain4
);
326 vst1q_f32(&OutBuffer
[pos
], dry4
);
328 for(;pos
< BufferSize
;pos
++)
329 OutBuffer
[pos
] += data
[c
][InPos
+pos
]*gain
;