1 #ifndef PHASE_SHIFTER_H
2 #define PHASE_SHIFTER_H
4 #ifdef HAVE_SSE_INTRINSICS
6 #elif defined(HAVE_NEON)
13 #include "alcomplex.h"
17 /* Implements a wide-band +90 degree phase-shift. Note that this should be
18 * given one sample less of a delay (FilterSize/2 - 1) compared to the direct
19 * signal delay (FilterSize/2) to properly align.
21 template<size_t FilterSize
>
22 struct PhaseShifterT
{
23 static_assert(FilterSize
>= 16, "FilterSize needs to be at least 16");
24 static_assert((FilterSize
&(FilterSize
-1)) == 0, "FilterSize needs to be power-of-two");
26 alignas(16) std::array
<float,FilterSize
/2> mCoeffs
{};
28 /* Some notes on this filter construction.
30 * A wide-band phase-shift filter needs a delay to maintain linearity. A
31 * dirac impulse in the center of a time-domain buffer represents a filter
32 * passing all frequencies through as-is with a pure delay. Converting that
33 * to the frequency domain, adjusting the phase of each frequency bin by
34 * +90 degrees, then converting back to the time domain, results in a FIR
35 * filter that applies a +90 degree wide-band phase-shift.
37 * A particularly notable aspect of the time-domain filter response is that
38 * every other coefficient is 0. This allows doubling the effective size of
39 * the filter, by storing only the non-0 coefficients and double-stepping
40 * over the input to apply it.
42 * Additionally, the resulting filter is independent of the sample rate.
43 * The same filter can be applied regardless of the device's sample rate
44 * and achieve the same effect.
48 using complex_d
= std::complex<double>;
49 constexpr size_t fft_size
{FilterSize
};
50 constexpr size_t half_size
{fft_size
/ 2};
52 auto fftBuffer
= std::make_unique
<complex_d
[]>(fft_size
);
53 std::fill_n(fftBuffer
.get(), fft_size
, complex_d
{});
54 fftBuffer
[half_size
] = 1.0;
56 forward_fft({fftBuffer
.get(), fft_size
});
57 for(size_t i
{0};i
< half_size
+1;++i
)
58 fftBuffer
[i
] = complex_d
{-fftBuffer
[i
].imag(), fftBuffer
[i
].real()};
59 for(size_t i
{half_size
+1};i
< fft_size
;++i
)
60 fftBuffer
[i
] = std::conj(fftBuffer
[fft_size
- i
]);
61 inverse_fft({fftBuffer
.get(), fft_size
});
63 auto fftiter
= fftBuffer
.get() + half_size
+ (FilterSize
/2 - 1);
64 for(float &coeff
: mCoeffs
)
66 coeff
= static_cast<float>(fftiter
->real() / double{fft_size
});
71 void process(al::span
<float> dst
, const float *RESTRICT src
) const;
72 void processAccum(al::span
<float> dst
, const float *RESTRICT src
) const;
75 #if defined(HAVE_NEON)
76 /* There doesn't seem to be NEON intrinsics to do this kind of stipple
77 * shuffling, so there's two custom methods for it.
79 static auto shuffle_2020(float32x4_t a
, float32x4_t b
)
81 float32x4_t ret
{vmovq_n_f32(vgetq_lane_f32(a
, 0))};
82 ret
= vsetq_lane_f32(vgetq_lane_f32(a
, 2), ret
, 1);
83 ret
= vsetq_lane_f32(vgetq_lane_f32(b
, 0), ret
, 2);
84 ret
= vsetq_lane_f32(vgetq_lane_f32(b
, 2), ret
, 3);
87 static auto shuffle_3131(float32x4_t a
, float32x4_t b
)
89 float32x4_t ret
{vmovq_n_f32(vgetq_lane_f32(a
, 1))};
90 ret
= vsetq_lane_f32(vgetq_lane_f32(a
, 3), ret
, 1);
91 ret
= vsetq_lane_f32(vgetq_lane_f32(b
, 1), ret
, 2);
92 ret
= vsetq_lane_f32(vgetq_lane_f32(b
, 3), ret
, 3);
95 static auto unpacklo(float32x4_t a
, float32x4_t b
)
97 float32x2x2_t result
{vzip_f32(vget_low_f32(a
), vget_low_f32(b
))};
98 return vcombine_f32(result
.val
[0], result
.val
[1]);
100 static auto unpackhi(float32x4_t a
, float32x4_t b
)
102 float32x2x2_t result
{vzip_f32(vget_high_f32(a
), vget_high_f32(b
))};
103 return vcombine_f32(result
.val
[0], result
.val
[1]);
105 static auto load4(float32_t a
, float32_t b
, float32_t c
, float32_t d
)
107 float32x4_t ret
{vmovq_n_f32(a
)};
108 ret
= vsetq_lane_f32(b
, ret
, 1);
109 ret
= vsetq_lane_f32(c
, ret
, 2);
110 ret
= vsetq_lane_f32(d
, ret
, 3);
117 inline void PhaseShifterT
<S
>::process(al::span
<float> dst
, const float *RESTRICT src
) const
119 #ifdef HAVE_SSE_INTRINSICS
120 if(size_t todo
{dst
.size()>>1})
122 auto *out
= reinterpret_cast<__m64
*>(dst
.data());
124 __m128 r04
{_mm_setzero_ps()};
125 __m128 r14
{_mm_setzero_ps()};
126 for(size_t j
{0};j
< mCoeffs
.size();j
+=4)
128 const __m128 coeffs
{_mm_load_ps(&mCoeffs
[j
])};
129 const __m128 s0
{_mm_loadu_ps(&src
[j
*2])};
130 const __m128 s1
{_mm_loadu_ps(&src
[j
*2 + 4])};
132 __m128 s
{_mm_shuffle_ps(s0
, s1
, _MM_SHUFFLE(2, 0, 2, 0))};
133 r04
= _mm_add_ps(r04
, _mm_mul_ps(s
, coeffs
));
135 s
= _mm_shuffle_ps(s0
, s1
, _MM_SHUFFLE(3, 1, 3, 1));
136 r14
= _mm_add_ps(r14
, _mm_mul_ps(s
, coeffs
));
140 __m128 r4
{_mm_add_ps(_mm_unpackhi_ps(r04
, r14
), _mm_unpacklo_ps(r04
, r14
))};
141 r4
= _mm_add_ps(r4
, _mm_movehl_ps(r4
, r4
));
143 _mm_storel_pi(out
, r4
);
149 __m128 r4
{_mm_setzero_ps()};
150 for(size_t j
{0};j
< mCoeffs
.size();j
+=4)
152 const __m128 coeffs
{_mm_load_ps(&mCoeffs
[j
])};
153 const __m128 s
{_mm_setr_ps(src
[j
*2], src
[j
*2 + 2], src
[j
*2 + 4], src
[j
*2 + 6])};
154 r4
= _mm_add_ps(r4
, _mm_mul_ps(s
, coeffs
));
156 r4
= _mm_add_ps(r4
, _mm_shuffle_ps(r4
, r4
, _MM_SHUFFLE(0, 1, 2, 3)));
157 r4
= _mm_add_ps(r4
, _mm_movehl_ps(r4
, r4
));
159 dst
.back() = _mm_cvtss_f32(r4
);
162 #elif defined(HAVE_NEON)
165 if(size_t todo
{dst
.size()>>1})
168 float32x4_t r04
{vdupq_n_f32(0.0f
)};
169 float32x4_t r14
{vdupq_n_f32(0.0f
)};
170 for(size_t j
{0};j
< mCoeffs
.size();j
+=4)
172 const float32x4_t coeffs
{vld1q_f32(&mCoeffs
[j
])};
173 const float32x4_t s0
{vld1q_f32(&src
[j
*2])};
174 const float32x4_t s1
{vld1q_f32(&src
[j
*2 + 4])};
176 r04
= vmlaq_f32(r04
, shuffle_2020(s0
, s1
), coeffs
);
177 r14
= vmlaq_f32(r14
, shuffle_3131(s0
, s1
), coeffs
);
181 float32x4_t r4
{vaddq_f32(unpackhi(r04
, r14
), unpacklo(r04
, r14
))};
182 float32x2_t r2
{vadd_f32(vget_low_f32(r4
), vget_high_f32(r4
))};
184 vst1_f32(&dst
[pos
], r2
);
190 float32x4_t r4
{vdupq_n_f32(0.0f
)};
191 for(size_t j
{0};j
< mCoeffs
.size();j
+=4)
193 const float32x4_t coeffs
{vld1q_f32(&mCoeffs
[j
])};
194 const float32x4_t s
{load4(src
[j
*2], src
[j
*2 + 2], src
[j
*2 + 4], src
[j
*2 + 6])};
195 r4
= vmlaq_f32(r4
, s
, coeffs
);
197 r4
= vaddq_f32(r4
, vrev64q_f32(r4
));
198 dst
[pos
] = vget_lane_f32(vadd_f32(vget_low_f32(r4
), vget_high_f32(r4
)), 0);
203 for(float &output
: dst
)
206 for(size_t j
{0};j
< mCoeffs
.size();++j
)
207 ret
+= src
[j
*2] * mCoeffs
[j
];
216 inline void PhaseShifterT
<S
>::processAccum(al::span
<float> dst
, const float *RESTRICT src
) const
218 #ifdef HAVE_SSE_INTRINSICS
219 if(size_t todo
{dst
.size()>>1})
221 auto *out
= reinterpret_cast<__m64
*>(dst
.data());
223 __m128 r04
{_mm_setzero_ps()};
224 __m128 r14
{_mm_setzero_ps()};
225 for(size_t j
{0};j
< mCoeffs
.size();j
+=4)
227 const __m128 coeffs
{_mm_load_ps(&mCoeffs
[j
])};
228 const __m128 s0
{_mm_loadu_ps(&src
[j
*2])};
229 const __m128 s1
{_mm_loadu_ps(&src
[j
*2 + 4])};
231 __m128 s
{_mm_shuffle_ps(s0
, s1
, _MM_SHUFFLE(2, 0, 2, 0))};
232 r04
= _mm_add_ps(r04
, _mm_mul_ps(s
, coeffs
));
234 s
= _mm_shuffle_ps(s0
, s1
, _MM_SHUFFLE(3, 1, 3, 1));
235 r14
= _mm_add_ps(r14
, _mm_mul_ps(s
, coeffs
));
239 __m128 r4
{_mm_add_ps(_mm_unpackhi_ps(r04
, r14
), _mm_unpacklo_ps(r04
, r14
))};
240 r4
= _mm_add_ps(r4
, _mm_movehl_ps(r4
, r4
));
242 _mm_storel_pi(out
, _mm_add_ps(_mm_loadl_pi(_mm_undefined_ps(), out
), r4
));
248 __m128 r4
{_mm_setzero_ps()};
249 for(size_t j
{0};j
< mCoeffs
.size();j
+=4)
251 const __m128 coeffs
{_mm_load_ps(&mCoeffs
[j
])};
252 const __m128 s
{_mm_setr_ps(src
[j
*2], src
[j
*2 + 2], src
[j
*2 + 4], src
[j
*2 + 6])};
253 r4
= _mm_add_ps(r4
, _mm_mul_ps(s
, coeffs
));
255 r4
= _mm_add_ps(r4
, _mm_shuffle_ps(r4
, r4
, _MM_SHUFFLE(0, 1, 2, 3)));
256 r4
= _mm_add_ps(r4
, _mm_movehl_ps(r4
, r4
));
258 dst
.back() += _mm_cvtss_f32(r4
);
261 #elif defined(HAVE_NEON)
264 if(size_t todo
{dst
.size()>>1})
267 float32x4_t r04
{vdupq_n_f32(0.0f
)};
268 float32x4_t r14
{vdupq_n_f32(0.0f
)};
269 for(size_t j
{0};j
< mCoeffs
.size();j
+=4)
271 const float32x4_t coeffs
{vld1q_f32(&mCoeffs
[j
])};
272 const float32x4_t s0
{vld1q_f32(&src
[j
*2])};
273 const float32x4_t s1
{vld1q_f32(&src
[j
*2 + 4])};
275 r04
= vmlaq_f32(r04
, shuffle_2020(s0
, s1
), coeffs
);
276 r14
= vmlaq_f32(r14
, shuffle_3131(s0
, s1
), coeffs
);
280 float32x4_t r4
{vaddq_f32(unpackhi(r04
, r14
), unpacklo(r04
, r14
))};
281 float32x2_t r2
{vadd_f32(vget_low_f32(r4
), vget_high_f32(r4
))};
283 vst1_f32(&dst
[pos
], vadd_f32(vld1_f32(&dst
[pos
]), r2
));
289 float32x4_t r4
{vdupq_n_f32(0.0f
)};
290 for(size_t j
{0};j
< mCoeffs
.size();j
+=4)
292 const float32x4_t coeffs
{vld1q_f32(&mCoeffs
[j
])};
293 const float32x4_t s
{load4(src
[j
*2], src
[j
*2 + 2], src
[j
*2 + 4], src
[j
*2 + 6])};
294 r4
= vmlaq_f32(r4
, s
, coeffs
);
296 r4
= vaddq_f32(r4
, vrev64q_f32(r4
));
297 dst
[pos
] += vget_lane_f32(vadd_f32(vget_low_f32(r4
), vget_high_f32(r4
)), 0);
302 for(float &output
: dst
)
305 for(size_t j
{0};j
< mCoeffs
.size();++j
)
306 ret
+= src
[j
*2] * mCoeffs
[j
];
314 #endif /* PHASE_SHIFTER_H */