[Cronet] Delay StartNetLog and StopNetLog until native request context is initialized
[chromium-blink-merge.git] / media / base / sinc_resampler.cc
blobfa2cf94b28925c58e87d424f72a68056fe9b02af
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 //
5 // Initial input buffer layout, dividing into regions r0_ to r4_ (note: r0_, r3_
6 // and r4_ will move after the first load):
7 //
8 // |----------------|-----------------------------------------|----------------|
9 //
10 // request_frames_
11 // <--------------------------------------------------------->
12 // r0_ (during first load)
14 // kKernelSize / 2 kKernelSize / 2 kKernelSize / 2 kKernelSize / 2
15 // <---------------> <---------------> <---------------> <--------------->
16 // r1_ r2_ r3_ r4_
18 // block_size_ == r4_ - r2_
19 // <--------------------------------------->
21 // request_frames_
22 // <------------------ ... ----------------->
23 // r0_ (during second load)
25 // On the second request r0_ slides to the right by kKernelSize / 2 and r3_, r4_
26 // and block_size_ are reinitialized via step (3) in the algorithm below.
28 // These new regions remain constant until a Flush() occurs. While complicated,
29 // this allows us to reduce jitter by always requesting the same amount from the
30 // provided callback.
32 // The algorithm:
34 // 1) Allocate input_buffer of size: request_frames_ + kKernelSize; this ensures
35 // there's enough room to read request_frames_ from the callback into region
36 // r0_ (which will move between the first and subsequent passes).
38 // 2) Let r1_, r2_ each represent half the kernel centered around r0_:
40 // r0_ = input_buffer_ + kKernelSize / 2
41 // r1_ = input_buffer_
42 // r2_ = r0_
44 // r0_ is always request_frames_ in size. r1_, r2_ are kKernelSize / 2 in
45 // size. r1_ must be zero initialized to avoid convolution with garbage (see
46 // step (5) for why).
48 // 3) Let r3_, r4_ each represent half the kernel right aligned with the end of
49 // r0_ and choose block_size_ as the distance in frames between r4_ and r2_:
51 // r3_ = r0_ + request_frames_ - kKernelSize
52 // r4_ = r0_ + request_frames_ - kKernelSize / 2
53 // block_size_ = r4_ - r2_ = request_frames_ - kKernelSize / 2
55 // 4) Consume request_frames_ frames into r0_.
57 // 5) Position kernel centered at start of r2_ and generate output frames until
58 // the kernel is centered at the start of r4_ or we've finished generating
59 // all the output frames.
61 // 6) Wrap left over data from the r3_ to r1_ and r4_ to r2_.
63 // 7) If we're on the second load, in order to avoid overwriting the frames we
64 // just wrapped from r4_ we need to slide r0_ to the right by the size of
65 // r4_, which is kKernelSize / 2:
67 // r0_ = r0_ + kKernelSize / 2 = input_buffer_ + kKernelSize
69 // r3_, r4_, and block_size_ then need to be reinitialized, so goto (3).
71 // 8) Else, if we're not on the second load, goto (4).
73 // Note: we're glossing over how the sub-sample handling works with
74 // |virtual_source_idx_|, etc.
76 // MSVC++ requires this to be set before any other includes to get M_PI.
77 #define _USE_MATH_DEFINES
79 #include "media/base/sinc_resampler.h"
81 #include <cmath>
82 #include <limits>
84 #include "base/logging.h"
86 #if defined(ARCH_CPU_X86_FAMILY)
87 #include <xmmintrin.h>
88 #define CONVOLVE_FUNC Convolve_SSE
89 #elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
90 #include <arm_neon.h>
91 #define CONVOLVE_FUNC Convolve_NEON
92 #else
93 #define CONVOLVE_FUNC Convolve_C
94 #endif
96 namespace media {
98 static double SincScaleFactor(double io_ratio) {
99 // |sinc_scale_factor| is basically the normalized cutoff frequency of the
100 // low-pass filter.
101 double sinc_scale_factor = io_ratio > 1.0 ? 1.0 / io_ratio : 1.0;
103 // The sinc function is an idealized brick-wall filter, but since we're
104 // windowing it the transition from pass to stop does not happen right away.
105 // So we should adjust the low pass filter cutoff slightly downward to avoid
106 // some aliasing at the very high-end.
107 // TODO(crogers): this value is empirical and to be more exact should vary
108 // depending on kKernelSize.
109 sinc_scale_factor *= 0.9;
111 return sinc_scale_factor;
114 static int CalculateChunkSize(int block_size_, double io_ratio) {
115 return block_size_ / io_ratio;
118 SincResampler::SincResampler(double io_sample_rate_ratio,
119 int request_frames,
120 const ReadCB& read_cb)
121 : io_sample_rate_ratio_(io_sample_rate_ratio),
122 read_cb_(read_cb),
123 request_frames_(request_frames),
124 input_buffer_size_(request_frames_ + kKernelSize),
125 // Create input buffers with a 16-byte alignment for SSE optimizations.
126 kernel_storage_(static_cast<float*>(
127 base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))),
128 kernel_pre_sinc_storage_(static_cast<float*>(
129 base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))),
130 kernel_window_storage_(static_cast<float*>(
131 base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))),
132 input_buffer_(static_cast<float*>(
133 base::AlignedAlloc(sizeof(float) * input_buffer_size_, 16))),
134 r1_(input_buffer_.get()),
135 r2_(input_buffer_.get() + kKernelSize / 2) {
136 CHECK_GT(request_frames_, 0);
137 Flush();
138 CHECK_GT(block_size_, kKernelSize)
139 << "block_size must be greater than kKernelSize!";
141 memset(kernel_storage_.get(), 0,
142 sizeof(*kernel_storage_.get()) * kKernelStorageSize);
143 memset(kernel_pre_sinc_storage_.get(), 0,
144 sizeof(*kernel_pre_sinc_storage_.get()) * kKernelStorageSize);
145 memset(kernel_window_storage_.get(), 0,
146 sizeof(*kernel_window_storage_.get()) * kKernelStorageSize);
148 InitializeKernel();
151 SincResampler::~SincResampler() {}
153 void SincResampler::UpdateRegions(bool second_load) {
154 // Setup various region pointers in the buffer (see diagram above). If we're
155 // on the second load we need to slide r0_ to the right by kKernelSize / 2.
156 r0_ = input_buffer_.get() + (second_load ? kKernelSize : kKernelSize / 2);
157 r3_ = r0_ + request_frames_ - kKernelSize;
158 r4_ = r0_ + request_frames_ - kKernelSize / 2;
159 block_size_ = r4_ - r2_;
160 chunk_size_ = CalculateChunkSize(block_size_, io_sample_rate_ratio_);
162 // r1_ at the beginning of the buffer.
163 CHECK_EQ(r1_, input_buffer_.get());
164 // r1_ left of r2_, r4_ left of r3_ and size correct.
165 CHECK_EQ(r2_ - r1_, r4_ - r3_);
166 // r2_ left of r3.
167 CHECK_LT(r2_, r3_);
170 void SincResampler::InitializeKernel() {
171 // Blackman window parameters.
172 static const double kAlpha = 0.16;
173 static const double kA0 = 0.5 * (1.0 - kAlpha);
174 static const double kA1 = 0.5;
175 static const double kA2 = 0.5 * kAlpha;
177 // Generates a set of windowed sinc() kernels.
178 // We generate a range of sub-sample offsets from 0.0 to 1.0.
179 const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_);
180 for (int offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
181 const float subsample_offset =
182 static_cast<float>(offset_idx) / kKernelOffsetCount;
184 for (int i = 0; i < kKernelSize; ++i) {
185 const int idx = i + offset_idx * kKernelSize;
186 const float pre_sinc =
187 static_cast<float>(M_PI * (i - kKernelSize / 2 - subsample_offset));
188 kernel_pre_sinc_storage_[idx] = pre_sinc;
190 // Compute Blackman window, matching the offset of the sinc().
191 const float x = (i - subsample_offset) / kKernelSize;
192 const float window = static_cast<float>(kA0 - kA1 * cos(2.0 * M_PI * x) +
193 kA2 * cos(4.0 * M_PI * x));
194 kernel_window_storage_[idx] = window;
196 // Compute the sinc with offset, then window the sinc() function and store
197 // at the correct offset.
198 kernel_storage_[idx] = static_cast<float>(window *
199 ((pre_sinc == 0) ?
200 sinc_scale_factor :
201 (sin(sinc_scale_factor * pre_sinc) / pre_sinc)));
206 void SincResampler::SetRatio(double io_sample_rate_ratio) {
207 if (fabs(io_sample_rate_ratio_ - io_sample_rate_ratio) <
208 std::numeric_limits<double>::epsilon()) {
209 return;
212 io_sample_rate_ratio_ = io_sample_rate_ratio;
213 chunk_size_ = CalculateChunkSize(block_size_, io_sample_rate_ratio_);
215 // Optimize reinitialization by reusing values which are independent of
216 // |sinc_scale_factor|. Provides a 3x speedup.
217 const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_);
218 for (int offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
219 for (int i = 0; i < kKernelSize; ++i) {
220 const int idx = i + offset_idx * kKernelSize;
221 const float window = kernel_window_storage_[idx];
222 const float pre_sinc = kernel_pre_sinc_storage_[idx];
224 kernel_storage_[idx] = static_cast<float>(window *
225 ((pre_sinc == 0) ?
226 sinc_scale_factor :
227 (sin(sinc_scale_factor * pre_sinc) / pre_sinc)));
232 void SincResampler::Resample(int frames, float* destination) {
233 int remaining_frames = frames;
235 // Step (1) -- Prime the input buffer at the start of the input stream.
236 if (!buffer_primed_ && remaining_frames) {
237 read_cb_.Run(request_frames_, r0_);
238 buffer_primed_ = true;
241 // Step (2) -- Resample! const what we can outside of the loop for speed. It
242 // actually has an impact on ARM performance. See inner loop comment below.
243 const double current_io_ratio = io_sample_rate_ratio_;
244 const float* const kernel_ptr = kernel_storage_.get();
245 while (remaining_frames) {
246 // Note: The loop construct here can severely impact performance on ARM
247 // or when built with clang. See https://codereview.chromium.org/18566009/
248 int source_idx = static_cast<int>(virtual_source_idx_);
249 while (source_idx < block_size_) {
250 // |virtual_source_idx_| lies in between two kernel offsets so figure out
251 // what they are.
252 const double subsample_remainder = virtual_source_idx_ - source_idx;
254 const double virtual_offset_idx =
255 subsample_remainder * kKernelOffsetCount;
256 const int offset_idx = static_cast<int>(virtual_offset_idx);
258 // We'll compute "convolutions" for the two kernels which straddle
259 // |virtual_source_idx_|.
260 const float* const k1 = kernel_ptr + offset_idx * kKernelSize;
261 const float* const k2 = k1 + kKernelSize;
263 // Ensure |k1|, |k2| are 16-byte aligned for SIMD usage. Should always be
264 // true so long as kKernelSize is a multiple of 16.
265 DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k1) & 0x0F);
266 DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k2) & 0x0F);
268 // Initialize input pointer based on quantized |virtual_source_idx_|.
269 const float* const input_ptr = r1_ + source_idx;
271 // Figure out how much to weight each kernel's "convolution".
272 const double kernel_interpolation_factor =
273 virtual_offset_idx - offset_idx;
274 *destination++ = CONVOLVE_FUNC(
275 input_ptr, k1, k2, kernel_interpolation_factor);
277 // Advance the virtual index.
278 virtual_source_idx_ += current_io_ratio;
279 source_idx = static_cast<int>(virtual_source_idx_);
281 if (!--remaining_frames)
282 return;
285 // Wrap back around to the start.
286 DCHECK_GE(virtual_source_idx_, block_size_);
287 virtual_source_idx_ -= block_size_;
289 // Step (3) -- Copy r3_, r4_ to r1_, r2_.
290 // This wraps the last input frames back to the start of the buffer.
291 memcpy(r1_, r3_, sizeof(*input_buffer_.get()) * kKernelSize);
293 // Step (4) -- Reinitialize regions if necessary.
294 if (r0_ == r2_)
295 UpdateRegions(true);
297 // Step (5) -- Refresh the buffer with more input.
298 read_cb_.Run(request_frames_, r0_);
302 void SincResampler::PrimeWithSilence() {
303 // By enforcing the buffer hasn't been primed, we ensure the input buffer has
304 // already been zeroed during construction or by a previous Flush() call.
305 DCHECK(!buffer_primed_);
306 DCHECK_EQ(input_buffer_[0], 0.0f);
307 UpdateRegions(true);
310 void SincResampler::Flush() {
311 virtual_source_idx_ = 0;
312 buffer_primed_ = false;
313 memset(input_buffer_.get(), 0,
314 sizeof(*input_buffer_.get()) * input_buffer_size_);
315 UpdateRegions(false);
318 double SincResampler::BufferedFrames() const {
319 if (buffer_primed_) {
320 return request_frames_ - virtual_source_idx_;
321 } else {
322 return 0.0;
326 float SincResampler::Convolve_C(const float* input_ptr, const float* k1,
327 const float* k2,
328 double kernel_interpolation_factor) {
329 float sum1 = 0;
330 float sum2 = 0;
332 // Generate a single output sample. Unrolling this loop hurt performance in
333 // local testing.
334 int n = kKernelSize;
335 while (n--) {
336 sum1 += *input_ptr * *k1++;
337 sum2 += *input_ptr++ * *k2++;
340 // Linearly interpolate the two "convolutions".
341 return static_cast<float>((1.0 - kernel_interpolation_factor) * sum1 +
342 kernel_interpolation_factor * sum2);
345 #if defined(ARCH_CPU_X86_FAMILY)
346 float SincResampler::Convolve_SSE(const float* input_ptr, const float* k1,
347 const float* k2,
348 double kernel_interpolation_factor) {
349 __m128 m_input;
350 __m128 m_sums1 = _mm_setzero_ps();
351 __m128 m_sums2 = _mm_setzero_ps();
353 // Based on |input_ptr| alignment, we need to use loadu or load. Unrolling
354 // these loops hurt performance in local testing.
355 if (reinterpret_cast<uintptr_t>(input_ptr) & 0x0F) {
356 for (int i = 0; i < kKernelSize; i += 4) {
357 m_input = _mm_loadu_ps(input_ptr + i);
358 m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i)));
359 m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i)));
361 } else {
362 for (int i = 0; i < kKernelSize; i += 4) {
363 m_input = _mm_load_ps(input_ptr + i);
364 m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i)));
365 m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i)));
369 // Linearly interpolate the two "convolutions".
370 m_sums1 = _mm_mul_ps(m_sums1, _mm_set_ps1(
371 static_cast<float>(1.0 - kernel_interpolation_factor)));
372 m_sums2 = _mm_mul_ps(m_sums2, _mm_set_ps1(
373 static_cast<float>(kernel_interpolation_factor)));
374 m_sums1 = _mm_add_ps(m_sums1, m_sums2);
376 // Sum components together.
377 float result;
378 m_sums2 = _mm_add_ps(_mm_movehl_ps(m_sums1, m_sums1), m_sums1);
379 _mm_store_ss(&result, _mm_add_ss(m_sums2, _mm_shuffle_ps(
380 m_sums2, m_sums2, 1)));
382 return result;
384 #elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
385 float SincResampler::Convolve_NEON(const float* input_ptr, const float* k1,
386 const float* k2,
387 double kernel_interpolation_factor) {
388 float32x4_t m_input;
389 float32x4_t m_sums1 = vmovq_n_f32(0);
390 float32x4_t m_sums2 = vmovq_n_f32(0);
392 const float* upper = input_ptr + kKernelSize;
393 for (; input_ptr < upper; ) {
394 m_input = vld1q_f32(input_ptr);
395 input_ptr += 4;
396 m_sums1 = vmlaq_f32(m_sums1, m_input, vld1q_f32(k1));
397 k1 += 4;
398 m_sums2 = vmlaq_f32(m_sums2, m_input, vld1q_f32(k2));
399 k2 += 4;
402 // Linearly interpolate the two "convolutions".
403 m_sums1 = vmlaq_f32(
404 vmulq_f32(m_sums1, vmovq_n_f32(1.0 - kernel_interpolation_factor)),
405 m_sums2, vmovq_n_f32(kernel_interpolation_factor));
407 // Sum components together.
408 float32x2_t m_half = vadd_f32(vget_high_f32(m_sums1), vget_low_f32(m_sums1));
409 return vget_lane_f32(vpadd_f32(m_half, m_half), 0);
411 #endif
413 } // namespace media