Cleanup: Make TogglePinnedToStartScreen() Windows only.
[chromium-blink-merge.git] / media / base / vector_math.cc
blob578290538ac26cee8080b144a39124ae344f8646
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "media/base/vector_math.h"
6 #include "media/base/vector_math_testing.h"
8 #include <algorithm>
10 #include "base/logging.h"
11 #include "build/build_config.h"
13 // NaCl does not allow intrinsics.
14 #if defined(ARCH_CPU_X86_FAMILY) && !defined(OS_NACL)
15 #include <xmmintrin.h>
16 // Don't use custom SSE versions where the auto-vectorized C version performs
17 // better, which is anywhere clang is used.
18 #if !defined(__clang__)
19 #define FMAC_FUNC FMAC_SSE
20 #define FMUL_FUNC FMUL_SSE
21 #else
22 #define FMAC_FUNC FMAC_C
23 #define FMUL_FUNC FMUL_C
24 #endif
25 #define EWMAAndMaxPower_FUNC EWMAAndMaxPower_SSE
26 #elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
27 #include <arm_neon.h>
28 #define FMAC_FUNC FMAC_NEON
29 #define FMUL_FUNC FMUL_NEON
30 #define EWMAAndMaxPower_FUNC EWMAAndMaxPower_NEON
31 #else
32 #define FMAC_FUNC FMAC_C
33 #define FMUL_FUNC FMUL_C
34 #define EWMAAndMaxPower_FUNC EWMAAndMaxPower_C
35 #endif
37 namespace media {
38 namespace vector_math {
40 void FMAC(const float src[], float scale, int len, float dest[]) {
41 // Ensure |src| and |dest| are 16-byte aligned.
42 DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(src) & (kRequiredAlignment - 1));
43 DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(dest) & (kRequiredAlignment - 1));
44 return FMAC_FUNC(src, scale, len, dest);
47 void FMAC_C(const float src[], float scale, int len, float dest[]) {
48 for (int i = 0; i < len; ++i)
49 dest[i] += src[i] * scale;
52 void FMUL(const float src[], float scale, int len, float dest[]) {
53 // Ensure |src| and |dest| are 16-byte aligned.
54 DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(src) & (kRequiredAlignment - 1));
55 DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(dest) & (kRequiredAlignment - 1));
56 return FMUL_FUNC(src, scale, len, dest);
59 void FMUL_C(const float src[], float scale, int len, float dest[]) {
60 for (int i = 0; i < len; ++i)
61 dest[i] = src[i] * scale;
64 void Crossfade(const float src[], int len, float dest[]) {
65 float cf_ratio = 0;
66 const float cf_increment = 1.0f / len;
67 for (int i = 0; i < len; ++i, cf_ratio += cf_increment)
68 dest[i] = (1.0f - cf_ratio) * src[i] + cf_ratio * dest[i];
71 std::pair<float, float> EWMAAndMaxPower(
72 float initial_value, const float src[], int len, float smoothing_factor) {
73 // Ensure |src| is 16-byte aligned.
74 DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(src) & (kRequiredAlignment - 1));
75 return EWMAAndMaxPower_FUNC(initial_value, src, len, smoothing_factor);
78 std::pair<float, float> EWMAAndMaxPower_C(
79 float initial_value, const float src[], int len, float smoothing_factor) {
80 std::pair<float, float> result(initial_value, 0.0f);
81 const float weight_prev = 1.0f - smoothing_factor;
82 for (int i = 0; i < len; ++i) {
83 result.first *= weight_prev;
84 const float sample = src[i];
85 const float sample_squared = sample * sample;
86 result.first += sample_squared * smoothing_factor;
87 result.second = std::max(result.second, sample_squared);
89 return result;
92 #if defined(ARCH_CPU_X86_FAMILY) && !defined(OS_NACL)
93 void FMUL_SSE(const float src[], float scale, int len, float dest[]) {
94 const int rem = len % 4;
95 const int last_index = len - rem;
96 __m128 m_scale = _mm_set_ps1(scale);
97 for (int i = 0; i < last_index; i += 4)
98 _mm_store_ps(dest + i, _mm_mul_ps(_mm_load_ps(src + i), m_scale));
100 // Handle any remaining values that wouldn't fit in an SSE pass.
101 for (int i = last_index; i < len; ++i)
102 dest[i] = src[i] * scale;
105 void FMAC_SSE(const float src[], float scale, int len, float dest[]) {
106 const int rem = len % 4;
107 const int last_index = len - rem;
108 __m128 m_scale = _mm_set_ps1(scale);
109 for (int i = 0; i < last_index; i += 4) {
110 _mm_store_ps(dest + i, _mm_add_ps(_mm_load_ps(dest + i),
111 _mm_mul_ps(_mm_load_ps(src + i), m_scale)));
114 // Handle any remaining values that wouldn't fit in an SSE pass.
115 for (int i = last_index; i < len; ++i)
116 dest[i] += src[i] * scale;
119 // Convenience macro to extract float 0 through 3 from the vector |a|. This is
120 // needed because compilers other than clang don't support access via
121 // operator[]().
122 #define EXTRACT_FLOAT(a, i) \
123 (i == 0 ? \
124 _mm_cvtss_f32(a) : \
125 _mm_cvtss_f32(_mm_shuffle_ps(a, a, i)))
127 std::pair<float, float> EWMAAndMaxPower_SSE(
128 float initial_value, const float src[], int len, float smoothing_factor) {
129 // When the recurrence is unrolled, we see that we can split it into 4
130 // separate lanes of evaluation:
132 // y[n] = a(S[n]^2) + (1-a)(y[n-1])
133 // = a(S[n]^2) + (1-a)^1(aS[n-1]^2) + (1-a)^2(aS[n-2]^2) + ...
134 // = z[n] + (1-a)^1(z[n-1]) + (1-a)^2(z[n-2]) + (1-a)^3(z[n-3])
136 // where z[n] = a(S[n]^2) + (1-a)^4(z[n-4]) + (1-a)^8(z[n-8]) + ...
138 // Thus, the strategy here is to compute z[n], z[n-1], z[n-2], and z[n-3] in
139 // each of the 4 lanes, and then combine them to give y[n].
141 const int rem = len % 4;
142 const int last_index = len - rem;
144 const __m128 smoothing_factor_x4 = _mm_set_ps1(smoothing_factor);
145 const float weight_prev = 1.0f - smoothing_factor;
146 const __m128 weight_prev_x4 = _mm_set_ps1(weight_prev);
147 const __m128 weight_prev_squared_x4 =
148 _mm_mul_ps(weight_prev_x4, weight_prev_x4);
149 const __m128 weight_prev_4th_x4 =
150 _mm_mul_ps(weight_prev_squared_x4, weight_prev_squared_x4);
152 // Compute z[n], z[n-1], z[n-2], and z[n-3] in parallel in lanes 3, 2, 1 and
153 // 0, respectively.
154 __m128 max_x4 = _mm_setzero_ps();
155 __m128 ewma_x4 = _mm_setr_ps(0.0f, 0.0f, 0.0f, initial_value);
156 int i;
157 for (i = 0; i < last_index; i += 4) {
158 ewma_x4 = _mm_mul_ps(ewma_x4, weight_prev_4th_x4);
159 const __m128 sample_x4 = _mm_load_ps(src + i);
160 const __m128 sample_squared_x4 = _mm_mul_ps(sample_x4, sample_x4);
161 max_x4 = _mm_max_ps(max_x4, sample_squared_x4);
162 // Note: The compiler optimizes this to a single multiply-and-accumulate
163 // instruction:
164 ewma_x4 = _mm_add_ps(ewma_x4,
165 _mm_mul_ps(sample_squared_x4, smoothing_factor_x4));
168 // y[n] = z[n] + (1-a)^1(z[n-1]) + (1-a)^2(z[n-2]) + (1-a)^3(z[n-3])
169 float ewma = EXTRACT_FLOAT(ewma_x4, 3);
170 ewma_x4 = _mm_mul_ps(ewma_x4, weight_prev_x4);
171 ewma += EXTRACT_FLOAT(ewma_x4, 2);
172 ewma_x4 = _mm_mul_ps(ewma_x4, weight_prev_x4);
173 ewma += EXTRACT_FLOAT(ewma_x4, 1);
174 ewma_x4 = _mm_mul_ss(ewma_x4, weight_prev_x4);
175 ewma += EXTRACT_FLOAT(ewma_x4, 0);
177 // Fold the maximums together to get the overall maximum.
178 max_x4 = _mm_max_ps(max_x4,
179 _mm_shuffle_ps(max_x4, max_x4, _MM_SHUFFLE(3, 3, 1, 1)));
180 max_x4 = _mm_max_ss(max_x4, _mm_shuffle_ps(max_x4, max_x4, 2));
182 std::pair<float, float> result(ewma, EXTRACT_FLOAT(max_x4, 0));
184 // Handle remaining values at the end of |src|.
185 for (; i < len; ++i) {
186 result.first *= weight_prev;
187 const float sample = src[i];
188 const float sample_squared = sample * sample;
189 result.first += sample_squared * smoothing_factor;
190 result.second = std::max(result.second, sample_squared);
193 return result;
195 #endif
197 #if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
198 void FMAC_NEON(const float src[], float scale, int len, float dest[]) {
199 const int rem = len % 4;
200 const int last_index = len - rem;
201 float32x4_t m_scale = vmovq_n_f32(scale);
202 for (int i = 0; i < last_index; i += 4) {
203 vst1q_f32(dest + i, vmlaq_f32(
204 vld1q_f32(dest + i), vld1q_f32(src + i), m_scale));
207 // Handle any remaining values that wouldn't fit in an NEON pass.
208 for (int i = last_index; i < len; ++i)
209 dest[i] += src[i] * scale;
212 void FMUL_NEON(const float src[], float scale, int len, float dest[]) {
213 const int rem = len % 4;
214 const int last_index = len - rem;
215 float32x4_t m_scale = vmovq_n_f32(scale);
216 for (int i = 0; i < last_index; i += 4)
217 vst1q_f32(dest + i, vmulq_f32(vld1q_f32(src + i), m_scale));
219 // Handle any remaining values that wouldn't fit in an NEON pass.
220 for (int i = last_index; i < len; ++i)
221 dest[i] = src[i] * scale;
224 std::pair<float, float> EWMAAndMaxPower_NEON(
225 float initial_value, const float src[], int len, float smoothing_factor) {
226 // When the recurrence is unrolled, we see that we can split it into 4
227 // separate lanes of evaluation:
229 // y[n] = a(S[n]^2) + (1-a)(y[n-1])
230 // = a(S[n]^2) + (1-a)^1(aS[n-1]^2) + (1-a)^2(aS[n-2]^2) + ...
231 // = z[n] + (1-a)^1(z[n-1]) + (1-a)^2(z[n-2]) + (1-a)^3(z[n-3])
233 // where z[n] = a(S[n]^2) + (1-a)^4(z[n-4]) + (1-a)^8(z[n-8]) + ...
235 // Thus, the strategy here is to compute z[n], z[n-1], z[n-2], and z[n-3] in
236 // each of the 4 lanes, and then combine them to give y[n].
238 const int rem = len % 4;
239 const int last_index = len - rem;
241 const float32x4_t smoothing_factor_x4 = vdupq_n_f32(smoothing_factor);
242 const float weight_prev = 1.0f - smoothing_factor;
243 const float32x4_t weight_prev_x4 = vdupq_n_f32(weight_prev);
244 const float32x4_t weight_prev_squared_x4 =
245 vmulq_f32(weight_prev_x4, weight_prev_x4);
246 const float32x4_t weight_prev_4th_x4 =
247 vmulq_f32(weight_prev_squared_x4, weight_prev_squared_x4);
249 // Compute z[n], z[n-1], z[n-2], and z[n-3] in parallel in lanes 3, 2, 1 and
250 // 0, respectively.
251 float32x4_t max_x4 = vdupq_n_f32(0.0f);
252 float32x4_t ewma_x4 = vsetq_lane_f32(initial_value, vdupq_n_f32(0.0f), 3);
253 int i;
254 for (i = 0; i < last_index; i += 4) {
255 ewma_x4 = vmulq_f32(ewma_x4, weight_prev_4th_x4);
256 const float32x4_t sample_x4 = vld1q_f32(src + i);
257 const float32x4_t sample_squared_x4 = vmulq_f32(sample_x4, sample_x4);
258 max_x4 = vmaxq_f32(max_x4, sample_squared_x4);
259 ewma_x4 = vmlaq_f32(ewma_x4, sample_squared_x4, smoothing_factor_x4);
262 // y[n] = z[n] + (1-a)^1(z[n-1]) + (1-a)^2(z[n-2]) + (1-a)^3(z[n-3])
263 float ewma = vgetq_lane_f32(ewma_x4, 3);
264 ewma_x4 = vmulq_f32(ewma_x4, weight_prev_x4);
265 ewma += vgetq_lane_f32(ewma_x4, 2);
266 ewma_x4 = vmulq_f32(ewma_x4, weight_prev_x4);
267 ewma += vgetq_lane_f32(ewma_x4, 1);
268 ewma_x4 = vmulq_f32(ewma_x4, weight_prev_x4);
269 ewma += vgetq_lane_f32(ewma_x4, 0);
271 // Fold the maximums together to get the overall maximum.
272 float32x2_t max_x2 = vpmax_f32(vget_low_f32(max_x4), vget_high_f32(max_x4));
273 max_x2 = vpmax_f32(max_x2, max_x2);
275 std::pair<float, float> result(ewma, vget_lane_f32(max_x2, 0));
277 // Handle remaining values at the end of |src|.
278 for (; i < len; ++i) {
279 result.first *= weight_prev;
280 const float sample = src[i];
281 const float sample_squared = sample * sample;
282 result.first += sample_squared * smoothing_factor;
283 result.second = std::max(result.second, sample_squared);
286 return result;
288 #endif
290 } // namespace vector_math
291 } // namespace media