1 // Copyright 2011 Google Inc. All Rights Reserved.
3 // Use of this source code is governed by a BSD-style license
4 // that can be found in the COPYING file in the root of the source
5 // tree. An additional intellectual property rights grant can be found
6 // in the file PATENTS. All contributing project authors may
7 // be found in the AUTHORS file in the root of the source tree.
8 // -----------------------------------------------------------------------------
10 // SSE2 version of some decoding functions (idct, loop filtering).
12 // Author: somnath@google.com (Somnath Banerjee)
13 // cduvivier@google.com (Christian Duvivier)
17 #if defined(WEBP_USE_SSE2)
19 // The 3-coeff sparse transform in SSE2 is not really faster than the plain-C
20 // one it seems => disable it by default. Uncomment the following to enable:
21 // #define USE_TRANSFORM_AC3
23 #include <emmintrin.h>
24 #include "../dec/vp8i.h"
26 //------------------------------------------------------------------------------
27 // Transforms (Paragraph 14.4)
29 static void TransformSSE2(const int16_t* in
, uint8_t* dst
, int do_two
) {
30 // This implementation makes use of 16-bit fixed point versions of two
31 // multiply constants:
32 // K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16
33 // K2 = sqrt(2) * sin (pi/8) ~= 35468 / 2^16
35 // To be able to use signed 16-bit integers, we use the following trick to
36 // have constants within range:
37 // - Associated constants are obtained by subtracting the 16-bit fixed point
39 // k = K - (1 << 16) => K = k + (1 << 16)
40 // K1 = 85267 => k1 = 20091
41 // K2 = 35468 => k2 = -30068
42 // - The multiplication of a variable by a constant become the sum of the
43 // variable and the multiplication of that variable by the associated
45 // (x * K) >> 16 = (x * (k + (1 << 16))) >> 16 = ((x * k ) >> 16) + x
46 const __m128i k1
= _mm_set1_epi16(20091);
47 const __m128i k2
= _mm_set1_epi16(-30068);
48 __m128i T0
, T1
, T2
, T3
;
50 // Load and concatenate the transform coefficients (we'll do two transforms
51 // in parallel). In the case of only one transform, the second half of the
52 // vectors will just contain random value we'll never use nor store.
53 __m128i in0
, in1
, in2
, in3
;
55 in0
= _mm_loadl_epi64((__m128i
*)&in
[0]);
56 in1
= _mm_loadl_epi64((__m128i
*)&in
[4]);
57 in2
= _mm_loadl_epi64((__m128i
*)&in
[8]);
58 in3
= _mm_loadl_epi64((__m128i
*)&in
[12]);
59 // a00 a10 a20 a30 x x x x
60 // a01 a11 a21 a31 x x x x
61 // a02 a12 a22 a32 x x x x
62 // a03 a13 a23 a33 x x x x
64 const __m128i inB0
= _mm_loadl_epi64((__m128i
*)&in
[16]);
65 const __m128i inB1
= _mm_loadl_epi64((__m128i
*)&in
[20]);
66 const __m128i inB2
= _mm_loadl_epi64((__m128i
*)&in
[24]);
67 const __m128i inB3
= _mm_loadl_epi64((__m128i
*)&in
[28]);
68 in0
= _mm_unpacklo_epi64(in0
, inB0
);
69 in1
= _mm_unpacklo_epi64(in1
, inB1
);
70 in2
= _mm_unpacklo_epi64(in2
, inB2
);
71 in3
= _mm_unpacklo_epi64(in3
, inB3
);
72 // a00 a10 a20 a30 b00 b10 b20 b30
73 // a01 a11 a21 a31 b01 b11 b21 b31
74 // a02 a12 a22 a32 b02 b12 b22 b32
75 // a03 a13 a23 a33 b03 b13 b23 b33
79 // Vertical pass and subsequent transpose.
81 // First pass, c and d calculations are longer because of the "trick"
83 const __m128i a
= _mm_add_epi16(in0
, in2
);
84 const __m128i b
= _mm_sub_epi16(in0
, in2
);
85 // c = MUL(in1, K2) - MUL(in3, K1) = MUL(in1, k2) - MUL(in3, k1) + in1 - in3
86 const __m128i c1
= _mm_mulhi_epi16(in1
, k2
);
87 const __m128i c2
= _mm_mulhi_epi16(in3
, k1
);
88 const __m128i c3
= _mm_sub_epi16(in1
, in3
);
89 const __m128i c4
= _mm_sub_epi16(c1
, c2
);
90 const __m128i c
= _mm_add_epi16(c3
, c4
);
91 // d = MUL(in1, K1) + MUL(in3, K2) = MUL(in1, k1) + MUL(in3, k2) + in1 + in3
92 const __m128i d1
= _mm_mulhi_epi16(in1
, k1
);
93 const __m128i d2
= _mm_mulhi_epi16(in3
, k2
);
94 const __m128i d3
= _mm_add_epi16(in1
, in3
);
95 const __m128i d4
= _mm_add_epi16(d1
, d2
);
96 const __m128i d
= _mm_add_epi16(d3
, d4
);
99 const __m128i tmp0
= _mm_add_epi16(a
, d
);
100 const __m128i tmp1
= _mm_add_epi16(b
, c
);
101 const __m128i tmp2
= _mm_sub_epi16(b
, c
);
102 const __m128i tmp3
= _mm_sub_epi16(a
, d
);
104 // Transpose the two 4x4.
105 // a00 a01 a02 a03 b00 b01 b02 b03
106 // a10 a11 a12 a13 b10 b11 b12 b13
107 // a20 a21 a22 a23 b20 b21 b22 b23
108 // a30 a31 a32 a33 b30 b31 b32 b33
109 const __m128i transpose0_0
= _mm_unpacklo_epi16(tmp0
, tmp1
);
110 const __m128i transpose0_1
= _mm_unpacklo_epi16(tmp2
, tmp3
);
111 const __m128i transpose0_2
= _mm_unpackhi_epi16(tmp0
, tmp1
);
112 const __m128i transpose0_3
= _mm_unpackhi_epi16(tmp2
, tmp3
);
113 // a00 a10 a01 a11 a02 a12 a03 a13
114 // a20 a30 a21 a31 a22 a32 a23 a33
115 // b00 b10 b01 b11 b02 b12 b03 b13
116 // b20 b30 b21 b31 b22 b32 b23 b33
117 const __m128i transpose1_0
= _mm_unpacklo_epi32(transpose0_0
, transpose0_1
);
118 const __m128i transpose1_1
= _mm_unpacklo_epi32(transpose0_2
, transpose0_3
);
119 const __m128i transpose1_2
= _mm_unpackhi_epi32(transpose0_0
, transpose0_1
);
120 const __m128i transpose1_3
= _mm_unpackhi_epi32(transpose0_2
, transpose0_3
);
121 // a00 a10 a20 a30 a01 a11 a21 a31
122 // b00 b10 b20 b30 b01 b11 b21 b31
123 // a02 a12 a22 a32 a03 a13 a23 a33
124 // b02 b12 a22 b32 b03 b13 b23 b33
125 T0
= _mm_unpacklo_epi64(transpose1_0
, transpose1_1
);
126 T1
= _mm_unpackhi_epi64(transpose1_0
, transpose1_1
);
127 T2
= _mm_unpacklo_epi64(transpose1_2
, transpose1_3
);
128 T3
= _mm_unpackhi_epi64(transpose1_2
, transpose1_3
);
129 // a00 a10 a20 a30 b00 b10 b20 b30
130 // a01 a11 a21 a31 b01 b11 b21 b31
131 // a02 a12 a22 a32 b02 b12 b22 b32
132 // a03 a13 a23 a33 b03 b13 b23 b33
135 // Horizontal pass and subsequent transpose.
137 // First pass, c and d calculations are longer because of the "trick"
139 const __m128i four
= _mm_set1_epi16(4);
140 const __m128i dc
= _mm_add_epi16(T0
, four
);
141 const __m128i a
= _mm_add_epi16(dc
, T2
);
142 const __m128i b
= _mm_sub_epi16(dc
, T2
);
143 // c = MUL(T1, K2) - MUL(T3, K1) = MUL(T1, k2) - MUL(T3, k1) + T1 - T3
144 const __m128i c1
= _mm_mulhi_epi16(T1
, k2
);
145 const __m128i c2
= _mm_mulhi_epi16(T3
, k1
);
146 const __m128i c3
= _mm_sub_epi16(T1
, T3
);
147 const __m128i c4
= _mm_sub_epi16(c1
, c2
);
148 const __m128i c
= _mm_add_epi16(c3
, c4
);
149 // d = MUL(T1, K1) + MUL(T3, K2) = MUL(T1, k1) + MUL(T3, k2) + T1 + T3
150 const __m128i d1
= _mm_mulhi_epi16(T1
, k1
);
151 const __m128i d2
= _mm_mulhi_epi16(T3
, k2
);
152 const __m128i d3
= _mm_add_epi16(T1
, T3
);
153 const __m128i d4
= _mm_add_epi16(d1
, d2
);
154 const __m128i d
= _mm_add_epi16(d3
, d4
);
157 const __m128i tmp0
= _mm_add_epi16(a
, d
);
158 const __m128i tmp1
= _mm_add_epi16(b
, c
);
159 const __m128i tmp2
= _mm_sub_epi16(b
, c
);
160 const __m128i tmp3
= _mm_sub_epi16(a
, d
);
161 const __m128i shifted0
= _mm_srai_epi16(tmp0
, 3);
162 const __m128i shifted1
= _mm_srai_epi16(tmp1
, 3);
163 const __m128i shifted2
= _mm_srai_epi16(tmp2
, 3);
164 const __m128i shifted3
= _mm_srai_epi16(tmp3
, 3);
166 // Transpose the two 4x4.
167 // a00 a01 a02 a03 b00 b01 b02 b03
168 // a10 a11 a12 a13 b10 b11 b12 b13
169 // a20 a21 a22 a23 b20 b21 b22 b23
170 // a30 a31 a32 a33 b30 b31 b32 b33
171 const __m128i transpose0_0
= _mm_unpacklo_epi16(shifted0
, shifted1
);
172 const __m128i transpose0_1
= _mm_unpacklo_epi16(shifted2
, shifted3
);
173 const __m128i transpose0_2
= _mm_unpackhi_epi16(shifted0
, shifted1
);
174 const __m128i transpose0_3
= _mm_unpackhi_epi16(shifted2
, shifted3
);
175 // a00 a10 a01 a11 a02 a12 a03 a13
176 // a20 a30 a21 a31 a22 a32 a23 a33
177 // b00 b10 b01 b11 b02 b12 b03 b13
178 // b20 b30 b21 b31 b22 b32 b23 b33
179 const __m128i transpose1_0
= _mm_unpacklo_epi32(transpose0_0
, transpose0_1
);
180 const __m128i transpose1_1
= _mm_unpacklo_epi32(transpose0_2
, transpose0_3
);
181 const __m128i transpose1_2
= _mm_unpackhi_epi32(transpose0_0
, transpose0_1
);
182 const __m128i transpose1_3
= _mm_unpackhi_epi32(transpose0_2
, transpose0_3
);
183 // a00 a10 a20 a30 a01 a11 a21 a31
184 // b00 b10 b20 b30 b01 b11 b21 b31
185 // a02 a12 a22 a32 a03 a13 a23 a33
186 // b02 b12 a22 b32 b03 b13 b23 b33
187 T0
= _mm_unpacklo_epi64(transpose1_0
, transpose1_1
);
188 T1
= _mm_unpackhi_epi64(transpose1_0
, transpose1_1
);
189 T2
= _mm_unpacklo_epi64(transpose1_2
, transpose1_3
);
190 T3
= _mm_unpackhi_epi64(transpose1_2
, transpose1_3
);
191 // a00 a10 a20 a30 b00 b10 b20 b30
192 // a01 a11 a21 a31 b01 b11 b21 b31
193 // a02 a12 a22 a32 b02 b12 b22 b32
194 // a03 a13 a23 a33 b03 b13 b23 b33
197 // Add inverse transform to 'dst' and store.
199 const __m128i zero
= _mm_setzero_si128();
200 // Load the reference(s).
201 __m128i dst0
, dst1
, dst2
, dst3
;
203 // Load eight bytes/pixels per line.
204 dst0
= _mm_loadl_epi64((__m128i
*)(dst
+ 0 * BPS
));
205 dst1
= _mm_loadl_epi64((__m128i
*)(dst
+ 1 * BPS
));
206 dst2
= _mm_loadl_epi64((__m128i
*)(dst
+ 2 * BPS
));
207 dst3
= _mm_loadl_epi64((__m128i
*)(dst
+ 3 * BPS
));
209 // Load four bytes/pixels per line.
210 dst0
= _mm_cvtsi32_si128(*(int*)(dst
+ 0 * BPS
));
211 dst1
= _mm_cvtsi32_si128(*(int*)(dst
+ 1 * BPS
));
212 dst2
= _mm_cvtsi32_si128(*(int*)(dst
+ 2 * BPS
));
213 dst3
= _mm_cvtsi32_si128(*(int*)(dst
+ 3 * BPS
));
216 dst0
= _mm_unpacklo_epi8(dst0
, zero
);
217 dst1
= _mm_unpacklo_epi8(dst1
, zero
);
218 dst2
= _mm_unpacklo_epi8(dst2
, zero
);
219 dst3
= _mm_unpacklo_epi8(dst3
, zero
);
220 // Add the inverse transform(s).
221 dst0
= _mm_add_epi16(dst0
, T0
);
222 dst1
= _mm_add_epi16(dst1
, T1
);
223 dst2
= _mm_add_epi16(dst2
, T2
);
224 dst3
= _mm_add_epi16(dst3
, T3
);
225 // Unsigned saturate to 8b.
226 dst0
= _mm_packus_epi16(dst0
, dst0
);
227 dst1
= _mm_packus_epi16(dst1
, dst1
);
228 dst2
= _mm_packus_epi16(dst2
, dst2
);
229 dst3
= _mm_packus_epi16(dst3
, dst3
);
230 // Store the results.
232 // Store eight bytes/pixels per line.
233 _mm_storel_epi64((__m128i
*)(dst
+ 0 * BPS
), dst0
);
234 _mm_storel_epi64((__m128i
*)(dst
+ 1 * BPS
), dst1
);
235 _mm_storel_epi64((__m128i
*)(dst
+ 2 * BPS
), dst2
);
236 _mm_storel_epi64((__m128i
*)(dst
+ 3 * BPS
), dst3
);
238 // Store four bytes/pixels per line.
239 *(int*)(dst
+ 0 * BPS
) = _mm_cvtsi128_si32(dst0
);
240 *(int*)(dst
+ 1 * BPS
) = _mm_cvtsi128_si32(dst1
);
241 *(int*)(dst
+ 2 * BPS
) = _mm_cvtsi128_si32(dst2
);
242 *(int*)(dst
+ 3 * BPS
) = _mm_cvtsi128_si32(dst3
);
247 #if defined(USE_TRANSFORM_AC3)
248 #define MUL(a, b) (((a) * (b)) >> 16)
249 static void TransformAC3SSE2(const int16_t* in
, uint8_t* dst
) {
250 static const int kC1
= 20091 + (1 << 16);
251 static const int kC2
= 35468;
252 const __m128i A
= _mm_set1_epi16(in
[0] + 4);
253 const __m128i c4
= _mm_set1_epi16(MUL(in
[4], kC2
));
254 const __m128i d4
= _mm_set1_epi16(MUL(in
[4], kC1
));
255 const int c1
= MUL(in
[1], kC2
);
256 const int d1
= MUL(in
[1], kC1
);
257 const __m128i CD
= _mm_set_epi16(0, 0, 0, 0, -d1
, -c1
, c1
, d1
);
258 const __m128i B
= _mm_adds_epi16(A
, CD
);
259 const __m128i m0
= _mm_adds_epi16(B
, d4
);
260 const __m128i m1
= _mm_adds_epi16(B
, c4
);
261 const __m128i m2
= _mm_subs_epi16(B
, c4
);
262 const __m128i m3
= _mm_subs_epi16(B
, d4
);
263 const __m128i zero
= _mm_setzero_si128();
264 // Load the source pixels.
265 __m128i dst0
= _mm_cvtsi32_si128(*(int*)(dst
+ 0 * BPS
));
266 __m128i dst1
= _mm_cvtsi32_si128(*(int*)(dst
+ 1 * BPS
));
267 __m128i dst2
= _mm_cvtsi32_si128(*(int*)(dst
+ 2 * BPS
));
268 __m128i dst3
= _mm_cvtsi32_si128(*(int*)(dst
+ 3 * BPS
));
270 dst0
= _mm_unpacklo_epi8(dst0
, zero
);
271 dst1
= _mm_unpacklo_epi8(dst1
, zero
);
272 dst2
= _mm_unpacklo_epi8(dst2
, zero
);
273 dst3
= _mm_unpacklo_epi8(dst3
, zero
);
274 // Add the inverse transform.
275 dst0
= _mm_adds_epi16(dst0
, _mm_srai_epi16(m0
, 3));
276 dst1
= _mm_adds_epi16(dst1
, _mm_srai_epi16(m1
, 3));
277 dst2
= _mm_adds_epi16(dst2
, _mm_srai_epi16(m2
, 3));
278 dst3
= _mm_adds_epi16(dst3
, _mm_srai_epi16(m3
, 3));
279 // Unsigned saturate to 8b.
280 dst0
= _mm_packus_epi16(dst0
, dst0
);
281 dst1
= _mm_packus_epi16(dst1
, dst1
);
282 dst2
= _mm_packus_epi16(dst2
, dst2
);
283 dst3
= _mm_packus_epi16(dst3
, dst3
);
284 // Store the results.
285 *(int*)(dst
+ 0 * BPS
) = _mm_cvtsi128_si32(dst0
);
286 *(int*)(dst
+ 1 * BPS
) = _mm_cvtsi128_si32(dst1
);
287 *(int*)(dst
+ 2 * BPS
) = _mm_cvtsi128_si32(dst2
);
288 *(int*)(dst
+ 3 * BPS
) = _mm_cvtsi128_si32(dst3
);
291 #endif // USE_TRANSFORM_AC3
293 //------------------------------------------------------------------------------
294 // Loop Filter (Paragraph 15)
296 // Compute abs(p - q) = subs(p - q) OR subs(q - p)
297 #define MM_ABS(p, q) _mm_or_si128( \
298 _mm_subs_epu8((q), (p)), \
299 _mm_subs_epu8((p), (q)))
301 // Shift each byte of "a" by N bits while preserving by the sign bit.
303 // It first shifts the lower bytes of the words and then the upper bytes and
304 // then merges the results together.
305 #define SIGNED_SHIFT_N(a, N) { \
307 t = _mm_slli_epi16(t, 8); \
308 t = _mm_srai_epi16(t, N); \
309 t = _mm_srli_epi16(t, 8); \
311 a = _mm_srai_epi16(a, N + 8); \
312 a = _mm_slli_epi16(a, 8); \
314 a = _mm_or_si128(t, a); \
317 #define FLIP_SIGN_BIT2(a, b) { \
318 a = _mm_xor_si128(a, sign_bit); \
319 b = _mm_xor_si128(b, sign_bit); \
322 #define FLIP_SIGN_BIT4(a, b, c, d) { \
323 FLIP_SIGN_BIT2(a, b); \
324 FLIP_SIGN_BIT2(c, d); \
327 #define GET_NOTHEV(p1, p0, q0, q1, hev_thresh, not_hev) { \
328 const __m128i zero = _mm_setzero_si128(); \
329 const __m128i t_1 = MM_ABS(p1, p0); \
330 const __m128i t_2 = MM_ABS(q1, q0); \
332 const __m128i h = _mm_set1_epi8(hev_thresh); \
333 const __m128i t_3 = _mm_subs_epu8(t_1, h); /* abs(p1 - p0) - hev_tresh */ \
334 const __m128i t_4 = _mm_subs_epu8(t_2, h); /* abs(q1 - q0) - hev_tresh */ \
336 not_hev = _mm_or_si128(t_3, t_4); \
337 not_hev = _mm_cmpeq_epi8(not_hev, zero); /* not_hev <= t1 && not_hev <= t2 */\
340 #define GET_BASE_DELTA(p1, p0, q0, q1, o) { \
341 const __m128i qp0 = _mm_subs_epi8(q0, p0); /* q0 - p0 */ \
342 o = _mm_subs_epi8(p1, q1); /* p1 - q1 */ \
343 o = _mm_adds_epi8(o, qp0); /* p1 - q1 + 1 * (q0 - p0) */ \
344 o = _mm_adds_epi8(o, qp0); /* p1 - q1 + 2 * (q0 - p0) */ \
345 o = _mm_adds_epi8(o, qp0); /* p1 - q1 + 3 * (q0 - p0) */ \
348 #define DO_SIMPLE_FILTER(p0, q0, fl) { \
349 const __m128i three = _mm_set1_epi8(3); \
350 const __m128i four = _mm_set1_epi8(4); \
351 __m128i v3 = _mm_adds_epi8(fl, three); \
352 __m128i v4 = _mm_adds_epi8(fl, four); \
355 SIGNED_SHIFT_N(v4, 3); /* v4 >> 3 */ \
356 q0 = _mm_subs_epi8(q0, v4); /* q0 -= v4 */ \
358 /* Now do +3 side */ \
359 SIGNED_SHIFT_N(v3, 3); /* v3 >> 3 */ \
360 p0 = _mm_adds_epi8(p0, v3); /* p0 += v3 */ \
363 // Updates values of 2 pixels at MB edge during complex filtering.
364 // Update operations:
365 // q = q - delta and p = p + delta; where delta = [(a_hi >> 7), (a_lo >> 7)]
366 #define UPDATE_2PIXELS(pi, qi, a_lo, a_hi) { \
367 const __m128i a_lo7 = _mm_srai_epi16(a_lo, 7); \
368 const __m128i a_hi7 = _mm_srai_epi16(a_hi, 7); \
369 const __m128i delta = _mm_packs_epi16(a_lo7, a_hi7); \
370 pi = _mm_adds_epi8(pi, delta); \
371 qi = _mm_subs_epi8(qi, delta); \
374 static void NeedsFilter(const __m128i
* p1
, const __m128i
* p0
, const __m128i
* q0
,
375 const __m128i
* q1
, int thresh
, __m128i
*mask
) {
376 __m128i t1
= MM_ABS(*p1
, *q1
); // abs(p1 - q1)
377 *mask
= _mm_set1_epi8(0xFE);
378 t1
= _mm_and_si128(t1
, *mask
); // set lsb of each byte to zero
379 t1
= _mm_srli_epi16(t1
, 1); // abs(p1 - q1) / 2
381 *mask
= MM_ABS(*p0
, *q0
); // abs(p0 - q0)
382 *mask
= _mm_adds_epu8(*mask
, *mask
); // abs(p0 - q0) * 2
383 *mask
= _mm_adds_epu8(*mask
, t1
); // abs(p0 - q0) * 2 + abs(p1 - q1) / 2
385 t1
= _mm_set1_epi8(thresh
);
386 *mask
= _mm_subs_epu8(*mask
, t1
); // mask <= thresh
387 *mask
= _mm_cmpeq_epi8(*mask
, _mm_setzero_si128());
390 //------------------------------------------------------------------------------
391 // Edge filtering functions
393 // Applies filter on 2 pixels (p0 and q0)
394 static WEBP_INLINE
void DoFilter2(const __m128i
* p1
, __m128i
* p0
, __m128i
* q0
,
395 const __m128i
* q1
, int thresh
) {
397 const __m128i sign_bit
= _mm_set1_epi8(0x80);
398 const __m128i p1s
= _mm_xor_si128(*p1
, sign_bit
);
399 const __m128i q1s
= _mm_xor_si128(*q1
, sign_bit
);
401 NeedsFilter(p1
, p0
, q0
, q1
, thresh
, &mask
);
403 // convert to signed values
404 FLIP_SIGN_BIT2(*p0
, *q0
);
406 GET_BASE_DELTA(p1s
, *p0
, *q0
, q1s
, a
);
407 a
= _mm_and_si128(a
, mask
); // mask filter values we don't care about
408 DO_SIMPLE_FILTER(*p0
, *q0
, a
);
411 FLIP_SIGN_BIT2(*p0
, *q0
);
414 // Applies filter on 4 pixels (p1, p0, q0 and q1)
415 static WEBP_INLINE
void DoFilter4(__m128i
* p1
, __m128i
*p0
,
416 __m128i
* q0
, __m128i
* q1
,
417 const __m128i
* mask
, int hev_thresh
) {
420 const __m128i sign_bit
= _mm_set1_epi8(0x80);
423 GET_NOTHEV(*p1
, *p0
, *q0
, *q1
, hev_thresh
, not_hev
);
425 // convert to signed values
426 FLIP_SIGN_BIT4(*p1
, *p0
, *q0
, *q1
);
428 t1
= _mm_subs_epi8(*p1
, *q1
); // p1 - q1
429 t1
= _mm_andnot_si128(not_hev
, t1
); // hev(p1 - q1)
430 t2
= _mm_subs_epi8(*q0
, *p0
); // q0 - p0
431 t1
= _mm_adds_epi8(t1
, t2
); // hev(p1 - q1) + 1 * (q0 - p0)
432 t1
= _mm_adds_epi8(t1
, t2
); // hev(p1 - q1) + 2 * (q0 - p0)
433 t1
= _mm_adds_epi8(t1
, t2
); // hev(p1 - q1) + 3 * (q0 - p0)
434 t1
= _mm_and_si128(t1
, *mask
); // mask filter values we don't care about
437 t2
= _mm_set1_epi8(4);
438 t2
= _mm_adds_epi8(t1
, t2
); // 3 * (q0 - p0) + (p1 - q1) + 4
439 SIGNED_SHIFT_N(t2
, 3); // (3 * (q0 - p0) + hev(p1 - q1) + 4) >> 3
441 *q0
= _mm_subs_epi8(*q0
, t2
); // q0 -= t2
444 t2
= _mm_set1_epi8(3);
445 t2
= _mm_adds_epi8(t1
, t2
); // +3 instead of +4
446 SIGNED_SHIFT_N(t2
, 3); // (3 * (q0 - p0) + hev(p1 - q1) + 3) >> 3
447 *p0
= _mm_adds_epi8(*p0
, t2
); // p0 += t2
449 t2
= _mm_set1_epi8(1);
450 t3
= _mm_adds_epi8(t3
, t2
);
451 SIGNED_SHIFT_N(t3
, 1); // (3 * (q0 - p0) + hev(p1 - q1) + 4) >> 4
453 t3
= _mm_and_si128(not_hev
, t3
); // if !hev
454 *q1
= _mm_subs_epi8(*q1
, t3
); // q1 -= t3
455 *p1
= _mm_adds_epi8(*p1
, t3
); // p1 += t3
458 FLIP_SIGN_BIT4(*p1
, *p0
, *q0
, *q1
);
461 // Applies filter on 6 pixels (p2, p1, p0, q0, q1 and q2)
462 static WEBP_INLINE
void DoFilter6(__m128i
*p2
, __m128i
* p1
, __m128i
*p0
,
463 __m128i
* q0
, __m128i
* q1
, __m128i
*q2
,
464 const __m128i
* mask
, int hev_thresh
) {
466 const __m128i sign_bit
= _mm_set1_epi8(0x80);
469 GET_NOTHEV(*p1
, *p0
, *q0
, *q1
, hev_thresh
, not_hev
);
471 // convert to signed values
472 FLIP_SIGN_BIT4(*p1
, *p0
, *q0
, *q1
);
473 FLIP_SIGN_BIT2(*p2
, *q2
);
475 GET_BASE_DELTA(*p1
, *p0
, *q0
, *q1
, a
);
477 { // do simple filter on pixels with hev
478 const __m128i m
= _mm_andnot_si128(not_hev
, *mask
);
479 const __m128i f
= _mm_and_si128(a
, m
);
480 DO_SIMPLE_FILTER(*p0
, *q0
, f
);
482 { // do strong filter on pixels with not hev
483 const __m128i zero
= _mm_setzero_si128();
484 const __m128i nine
= _mm_set1_epi16(0x0900);
485 const __m128i sixty_three
= _mm_set1_epi16(63);
487 const __m128i m
= _mm_and_si128(not_hev
, *mask
);
488 const __m128i f
= _mm_and_si128(a
, m
);
489 const __m128i f_lo
= _mm_unpacklo_epi8(zero
, f
);
490 const __m128i f_hi
= _mm_unpackhi_epi8(zero
, f
);
492 const __m128i f9_lo
= _mm_mulhi_epi16(f_lo
, nine
); // Filter (lo) * 9
493 const __m128i f9_hi
= _mm_mulhi_epi16(f_hi
, nine
); // Filter (hi) * 9
494 const __m128i f18_lo
= _mm_add_epi16(f9_lo
, f9_lo
); // Filter (lo) * 18
495 const __m128i f18_hi
= _mm_add_epi16(f9_hi
, f9_hi
); // Filter (hi) * 18
497 const __m128i a2_lo
= _mm_add_epi16(f9_lo
, sixty_three
); // Filter * 9 + 63
498 const __m128i a2_hi
= _mm_add_epi16(f9_hi
, sixty_three
); // Filter * 9 + 63
500 const __m128i a1_lo
= _mm_add_epi16(f18_lo
, sixty_three
); // F... * 18 + 63
501 const __m128i a1_hi
= _mm_add_epi16(f18_hi
, sixty_three
); // F... * 18 + 63
503 const __m128i a0_lo
= _mm_add_epi16(f18_lo
, a2_lo
); // Filter * 27 + 63
504 const __m128i a0_hi
= _mm_add_epi16(f18_hi
, a2_hi
); // Filter * 27 + 63
506 UPDATE_2PIXELS(*p2
, *q2
, a2_lo
, a2_hi
);
507 UPDATE_2PIXELS(*p1
, *q1
, a1_lo
, a1_hi
);
508 UPDATE_2PIXELS(*p0
, *q0
, a0_lo
, a0_hi
);
512 FLIP_SIGN_BIT4(*p1
, *p0
, *q0
, *q1
);
513 FLIP_SIGN_BIT2(*p2
, *q2
);
516 // reads 8 rows across a vertical edge.
518 // TODO(somnath): Investigate _mm_shuffle* also see if it can be broken into
519 // two Load4x4() to avoid code duplication.
520 static WEBP_INLINE
void Load8x4(const uint8_t* b
, int stride
,
521 __m128i
* p
, __m128i
* q
) {
524 // Load 0th, 1st, 4th and 5th rows
525 __m128i r0
= _mm_cvtsi32_si128(*((int*)&b
[0 * stride
])); // 03 02 01 00
526 __m128i r1
= _mm_cvtsi32_si128(*((int*)&b
[1 * stride
])); // 13 12 11 10
527 __m128i r4
= _mm_cvtsi32_si128(*((int*)&b
[4 * stride
])); // 43 42 41 40
528 __m128i r5
= _mm_cvtsi32_si128(*((int*)&b
[5 * stride
])); // 53 52 51 50
530 r0
= _mm_unpacklo_epi32(r0
, r4
); // 43 42 41 40 03 02 01 00
531 r1
= _mm_unpacklo_epi32(r1
, r5
); // 53 52 51 50 13 12 11 10
533 // t1 = 53 43 52 42 51 41 50 40 13 03 12 02 11 01 10 00
534 t1
= _mm_unpacklo_epi8(r0
, r1
);
536 // Load 2nd, 3rd, 6th and 7th rows
537 r0
= _mm_cvtsi32_si128(*((int*)&b
[2 * stride
])); // 23 22 21 22
538 r1
= _mm_cvtsi32_si128(*((int*)&b
[3 * stride
])); // 33 32 31 30
539 r4
= _mm_cvtsi32_si128(*((int*)&b
[6 * stride
])); // 63 62 61 60
540 r5
= _mm_cvtsi32_si128(*((int*)&b
[7 * stride
])); // 73 72 71 70
542 r0
= _mm_unpacklo_epi32(r0
, r4
); // 63 62 61 60 23 22 21 20
543 r1
= _mm_unpacklo_epi32(r1
, r5
); // 73 72 71 70 33 32 31 30
545 // t2 = 73 63 72 62 71 61 70 60 33 23 32 22 31 21 30 20
546 t2
= _mm_unpacklo_epi8(r0
, r1
);
548 // t1 = 33 23 13 03 32 22 12 02 31 21 11 01 30 20 10 00
549 // t2 = 73 63 53 43 72 62 52 42 71 61 51 41 70 60 50 40
551 t1
= _mm_unpacklo_epi16(t1
, t2
);
552 t2
= _mm_unpackhi_epi16(r0
, t2
);
554 // *p = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00
555 // *q = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02
556 *p
= _mm_unpacklo_epi32(t1
, t2
);
557 *q
= _mm_unpackhi_epi32(t1
, t2
);
560 static WEBP_INLINE
void Load16x4(const uint8_t* r0
, const uint8_t* r8
,
562 __m128i
* p1
, __m128i
* p0
,
563 __m128i
* q0
, __m128i
* q1
) {
565 // Assume the pixels around the edge (|) are numbered as follows
572 // r0 is pointing to the 0th row (00)
573 // r8 is pointing to the 8th row (80)
576 // p1 = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00
577 // q0 = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02
578 // p0 = f1 e1 d1 c1 b1 a1 91 81 f0 e0 d0 c0 b0 a0 90 80
579 // q1 = f3 e3 d3 c3 b3 a3 93 83 f2 e2 d2 c2 b2 a2 92 82
580 Load8x4(r0
, stride
, p1
, q0
);
581 Load8x4(r8
, stride
, p0
, q1
);
585 // p1 = f0 e0 d0 c0 b0 a0 90 80 70 60 50 40 30 20 10 00
586 // p0 = f1 e1 d1 c1 b1 a1 91 81 71 61 51 41 31 21 11 01
587 // q0 = f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02
588 // q1 = f3 e3 d3 c3 b3 a3 93 83 73 63 53 43 33 23 13 03
589 *p1
= _mm_unpacklo_epi64(t1
, *p0
);
590 *p0
= _mm_unpackhi_epi64(t1
, *p0
);
591 *q0
= _mm_unpacklo_epi64(t2
, *q1
);
592 *q1
= _mm_unpackhi_epi64(t2
, *q1
);
595 static WEBP_INLINE
void Store4x4(__m128i
* x
, uint8_t* dst
, int stride
) {
597 for (i
= 0; i
< 4; ++i
, dst
+= stride
) {
598 *((int32_t*)dst
) = _mm_cvtsi128_si32(*x
);
599 *x
= _mm_srli_si128(*x
, 4);
603 // Transpose back and store
604 static WEBP_INLINE
void Store16x4(uint8_t* r0
, uint8_t* r8
, int stride
,
605 __m128i
* p1
, __m128i
* p0
,
606 __m128i
* q0
, __m128i
* q1
) {
609 // p0 = 71 70 61 60 51 50 41 40 31 30 21 20 11 10 01 00
610 // p1 = f1 f0 e1 e0 d1 d0 c1 c0 b1 b0 a1 a0 91 90 81 80
612 *p0
= _mm_unpacklo_epi8(*p1
, t1
);
613 *p1
= _mm_unpackhi_epi8(*p1
, t1
);
615 // q0 = 73 72 63 62 53 52 43 42 33 32 23 22 13 12 03 02
616 // q1 = f3 f2 e3 e2 d3 d2 c3 c2 b3 b2 a3 a2 93 92 83 82
618 *q0
= _mm_unpacklo_epi8(t1
, *q1
);
619 *q1
= _mm_unpackhi_epi8(t1
, *q1
);
621 // p0 = 33 32 31 30 23 22 21 20 13 12 11 10 03 02 01 00
622 // q0 = 73 72 71 70 63 62 61 60 53 52 51 50 43 42 41 40
624 *p0
= _mm_unpacklo_epi16(t1
, *q0
);
625 *q0
= _mm_unpackhi_epi16(t1
, *q0
);
627 // p1 = b3 b2 b1 b0 a3 a2 a1 a0 93 92 91 90 83 82 81 80
628 // q1 = f3 f2 f1 f0 e3 e2 e1 e0 d3 d2 d1 d0 c3 c2 c1 c0
630 *p1
= _mm_unpacklo_epi16(t1
, *q1
);
631 *q1
= _mm_unpackhi_epi16(t1
, *q1
);
633 Store4x4(p0
, r0
, stride
);
635 Store4x4(q0
, r0
, stride
);
637 Store4x4(p1
, r8
, stride
);
639 Store4x4(q1
, r8
, stride
);
642 //------------------------------------------------------------------------------
643 // Simple In-loop filtering (Paragraph 15.2)
645 static void SimpleVFilter16SSE2(uint8_t* p
, int stride
, int thresh
) {
647 __m128i p1
= _mm_loadu_si128((__m128i
*)&p
[-2 * stride
]);
648 __m128i p0
= _mm_loadu_si128((__m128i
*)&p
[-stride
]);
649 __m128i q0
= _mm_loadu_si128((__m128i
*)&p
[0]);
650 __m128i q1
= _mm_loadu_si128((__m128i
*)&p
[stride
]);
652 DoFilter2(&p1
, &p0
, &q0
, &q1
, thresh
);
655 _mm_storeu_si128((__m128i
*)&p
[-stride
], p0
);
656 _mm_storeu_si128((__m128i
*)p
, q0
);
659 static void SimpleHFilter16SSE2(uint8_t* p
, int stride
, int thresh
) {
660 __m128i p1
, p0
, q0
, q1
;
662 p
-= 2; // beginning of p1
664 Load16x4(p
, p
+ 8 * stride
, stride
, &p1
, &p0
, &q0
, &q1
);
665 DoFilter2(&p1
, &p0
, &q0
, &q1
, thresh
);
666 Store16x4(p
, p
+ 8 * stride
, stride
, &p1
, &p0
, &q0
, &q1
);
669 static void SimpleVFilter16iSSE2(uint8_t* p
, int stride
, int thresh
) {
671 for (k
= 3; k
> 0; --k
) {
673 SimpleVFilter16SSE2(p
, stride
, thresh
);
677 static void SimpleHFilter16iSSE2(uint8_t* p
, int stride
, int thresh
) {
679 for (k
= 3; k
> 0; --k
) {
681 SimpleHFilter16SSE2(p
, stride
, thresh
);
685 //------------------------------------------------------------------------------
686 // Complex In-loop filtering (Paragraph 15.3)
688 #define MAX_DIFF1(p3, p2, p1, p0, m) { \
689 m = MM_ABS(p3, p2); \
690 m = _mm_max_epu8(m, MM_ABS(p2, p1)); \
691 m = _mm_max_epu8(m, MM_ABS(p1, p0)); \
694 #define MAX_DIFF2(p3, p2, p1, p0, m) { \
695 m = _mm_max_epu8(m, MM_ABS(p3, p2)); \
696 m = _mm_max_epu8(m, MM_ABS(p2, p1)); \
697 m = _mm_max_epu8(m, MM_ABS(p1, p0)); \
700 #define LOAD_H_EDGES4(p, stride, e1, e2, e3, e4) { \
701 e1 = _mm_loadu_si128((__m128i*)&(p)[0 * stride]); \
702 e2 = _mm_loadu_si128((__m128i*)&(p)[1 * stride]); \
703 e3 = _mm_loadu_si128((__m128i*)&(p)[2 * stride]); \
704 e4 = _mm_loadu_si128((__m128i*)&(p)[3 * stride]); \
707 #define LOADUV_H_EDGE(p, u, v, stride) { \
708 p = _mm_loadl_epi64((__m128i*)&(u)[(stride)]); \
709 p = _mm_unpacklo_epi64(p, _mm_loadl_epi64((__m128i*)&(v)[(stride)])); \
712 #define LOADUV_H_EDGES4(u, v, stride, e1, e2, e3, e4) { \
713 LOADUV_H_EDGE(e1, u, v, 0 * stride); \
714 LOADUV_H_EDGE(e2, u, v, 1 * stride); \
715 LOADUV_H_EDGE(e3, u, v, 2 * stride); \
716 LOADUV_H_EDGE(e4, u, v, 3 * stride); \
719 #define STOREUV(p, u, v, stride) { \
720 _mm_storel_epi64((__m128i*)&u[(stride)], p); \
721 p = _mm_srli_si128(p, 8); \
722 _mm_storel_epi64((__m128i*)&v[(stride)], p); \
725 #define COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask) { \
727 const __m128i it = _mm_set1_epi8(ithresh); \
728 mask = _mm_subs_epu8(mask, it); \
729 mask = _mm_cmpeq_epi8(mask, _mm_setzero_si128()); \
730 NeedsFilter(&p1, &p0, &q0, &q1, thresh, &fl_yes); \
731 mask = _mm_and_si128(mask, fl_yes); \
734 // on macroblock edges
735 static void VFilter16SSE2(uint8_t* p
, int stride
,
736 int thresh
, int ithresh
, int hev_thresh
) {
739 __m128i p2
, p1
, p0
, q0
, q1
, q2
;
741 // Load p3, p2, p1, p0
742 LOAD_H_EDGES4(p
- 4 * stride
, stride
, t1
, p2
, p1
, p0
);
743 MAX_DIFF1(t1
, p2
, p1
, p0
, mask
);
745 // Load q0, q1, q2, q3
746 LOAD_H_EDGES4(p
, stride
, q0
, q1
, q2
, t1
);
747 MAX_DIFF2(t1
, q2
, q1
, q0
, mask
);
749 COMPLEX_FL_MASK(p1
, p0
, q0
, q1
, thresh
, ithresh
, mask
);
750 DoFilter6(&p2
, &p1
, &p0
, &q0
, &q1
, &q2
, &mask
, hev_thresh
);
753 _mm_storeu_si128((__m128i
*)&p
[-3 * stride
], p2
);
754 _mm_storeu_si128((__m128i
*)&p
[-2 * stride
], p1
);
755 _mm_storeu_si128((__m128i
*)&p
[-1 * stride
], p0
);
756 _mm_storeu_si128((__m128i
*)&p
[0 * stride
], q0
);
757 _mm_storeu_si128((__m128i
*)&p
[1 * stride
], q1
);
758 _mm_storeu_si128((__m128i
*)&p
[2 * stride
], q2
);
761 static void HFilter16SSE2(uint8_t* p
, int stride
,
762 int thresh
, int ithresh
, int hev_thresh
) {
764 __m128i p3
, p2
, p1
, p0
, q0
, q1
, q2
, q3
;
766 uint8_t* const b
= p
- 4;
767 Load16x4(b
, b
+ 8 * stride
, stride
, &p3
, &p2
, &p1
, &p0
); // p3, p2, p1, p0
768 MAX_DIFF1(p3
, p2
, p1
, p0
, mask
);
770 Load16x4(p
, p
+ 8 * stride
, stride
, &q0
, &q1
, &q2
, &q3
); // q0, q1, q2, q3
771 MAX_DIFF2(q3
, q2
, q1
, q0
, mask
);
773 COMPLEX_FL_MASK(p1
, p0
, q0
, q1
, thresh
, ithresh
, mask
);
774 DoFilter6(&p2
, &p1
, &p0
, &q0
, &q1
, &q2
, &mask
, hev_thresh
);
776 Store16x4(b
, b
+ 8 * stride
, stride
, &p3
, &p2
, &p1
, &p0
);
777 Store16x4(p
, p
+ 8 * stride
, stride
, &q0
, &q1
, &q2
, &q3
);
780 // on three inner edges
781 static void VFilter16iSSE2(uint8_t* p
, int stride
,
782 int thresh
, int ithresh
, int hev_thresh
) {
785 __m128i t1
, t2
, p1
, p0
, q0
, q1
;
787 for (k
= 3; k
> 0; --k
) {
788 // Load p3, p2, p1, p0
789 LOAD_H_EDGES4(p
, stride
, t2
, t1
, p1
, p0
);
790 MAX_DIFF1(t2
, t1
, p1
, p0
, mask
);
794 // Load q0, q1, q2, q3
795 LOAD_H_EDGES4(p
, stride
, q0
, q1
, t1
, t2
);
796 MAX_DIFF2(t2
, t1
, q1
, q0
, mask
);
798 COMPLEX_FL_MASK(p1
, p0
, q0
, q1
, thresh
, ithresh
, mask
);
799 DoFilter4(&p1
, &p0
, &q0
, &q1
, &mask
, hev_thresh
);
802 _mm_storeu_si128((__m128i
*)&p
[-2 * stride
], p1
);
803 _mm_storeu_si128((__m128i
*)&p
[-1 * stride
], p0
);
804 _mm_storeu_si128((__m128i
*)&p
[0 * stride
], q0
);
805 _mm_storeu_si128((__m128i
*)&p
[1 * stride
], q1
);
809 static void HFilter16iSSE2(uint8_t* p
, int stride
,
810 int thresh
, int ithresh
, int hev_thresh
) {
814 __m128i t1
, t2
, p1
, p0
, q0
, q1
;
816 for (k
= 3; k
> 0; --k
) {
818 Load16x4(b
, b
+ 8 * stride
, stride
, &t2
, &t1
, &p1
, &p0
); // p3, p2, p1, p0
819 MAX_DIFF1(t2
, t1
, p1
, p0
, mask
);
821 b
+= 4; // beginning of q0
822 Load16x4(b
, b
+ 8 * stride
, stride
, &q0
, &q1
, &t1
, &t2
); // q0, q1, q2, q3
823 MAX_DIFF2(t2
, t1
, q1
, q0
, mask
);
825 COMPLEX_FL_MASK(p1
, p0
, q0
, q1
, thresh
, ithresh
, mask
);
826 DoFilter4(&p1
, &p0
, &q0
, &q1
, &mask
, hev_thresh
);
828 b
-= 2; // beginning of p1
829 Store16x4(b
, b
+ 8 * stride
, stride
, &p1
, &p0
, &q0
, &q1
);
835 // 8-pixels wide variant, for chroma filtering
836 static void VFilter8SSE2(uint8_t* u
, uint8_t* v
, int stride
,
837 int thresh
, int ithresh
, int hev_thresh
) {
839 __m128i t1
, p2
, p1
, p0
, q0
, q1
, q2
;
841 // Load p3, p2, p1, p0
842 LOADUV_H_EDGES4(u
- 4 * stride
, v
- 4 * stride
, stride
, t1
, p2
, p1
, p0
);
843 MAX_DIFF1(t1
, p2
, p1
, p0
, mask
);
845 // Load q0, q1, q2, q3
846 LOADUV_H_EDGES4(u
, v
, stride
, q0
, q1
, q2
, t1
);
847 MAX_DIFF2(t1
, q2
, q1
, q0
, mask
);
849 COMPLEX_FL_MASK(p1
, p0
, q0
, q1
, thresh
, ithresh
, mask
);
850 DoFilter6(&p2
, &p1
, &p0
, &q0
, &q1
, &q2
, &mask
, hev_thresh
);
853 STOREUV(p2
, u
, v
, -3 * stride
);
854 STOREUV(p1
, u
, v
, -2 * stride
);
855 STOREUV(p0
, u
, v
, -1 * stride
);
856 STOREUV(q0
, u
, v
, 0 * stride
);
857 STOREUV(q1
, u
, v
, 1 * stride
);
858 STOREUV(q2
, u
, v
, 2 * stride
);
861 static void HFilter8SSE2(uint8_t* u
, uint8_t* v
, int stride
,
862 int thresh
, int ithresh
, int hev_thresh
) {
864 __m128i p3
, p2
, p1
, p0
, q0
, q1
, q2
, q3
;
866 uint8_t* const tu
= u
- 4;
867 uint8_t* const tv
= v
- 4;
868 Load16x4(tu
, tv
, stride
, &p3
, &p2
, &p1
, &p0
); // p3, p2, p1, p0
869 MAX_DIFF1(p3
, p2
, p1
, p0
, mask
);
871 Load16x4(u
, v
, stride
, &q0
, &q1
, &q2
, &q3
); // q0, q1, q2, q3
872 MAX_DIFF2(q3
, q2
, q1
, q0
, mask
);
874 COMPLEX_FL_MASK(p1
, p0
, q0
, q1
, thresh
, ithresh
, mask
);
875 DoFilter6(&p2
, &p1
, &p0
, &q0
, &q1
, &q2
, &mask
, hev_thresh
);
877 Store16x4(tu
, tv
, stride
, &p3
, &p2
, &p1
, &p0
);
878 Store16x4(u
, v
, stride
, &q0
, &q1
, &q2
, &q3
);
881 static void VFilter8iSSE2(uint8_t* u
, uint8_t* v
, int stride
,
882 int thresh
, int ithresh
, int hev_thresh
) {
884 __m128i t1
, t2
, p1
, p0
, q0
, q1
;
886 // Load p3, p2, p1, p0
887 LOADUV_H_EDGES4(u
, v
, stride
, t2
, t1
, p1
, p0
);
888 MAX_DIFF1(t2
, t1
, p1
, p0
, mask
);
893 // Load q0, q1, q2, q3
894 LOADUV_H_EDGES4(u
, v
, stride
, q0
, q1
, t1
, t2
);
895 MAX_DIFF2(t2
, t1
, q1
, q0
, mask
);
897 COMPLEX_FL_MASK(p1
, p0
, q0
, q1
, thresh
, ithresh
, mask
);
898 DoFilter4(&p1
, &p0
, &q0
, &q1
, &mask
, hev_thresh
);
901 STOREUV(p1
, u
, v
, -2 * stride
);
902 STOREUV(p0
, u
, v
, -1 * stride
);
903 STOREUV(q0
, u
, v
, 0 * stride
);
904 STOREUV(q1
, u
, v
, 1 * stride
);
907 static void HFilter8iSSE2(uint8_t* u
, uint8_t* v
, int stride
,
908 int thresh
, int ithresh
, int hev_thresh
) {
910 __m128i t1
, t2
, p1
, p0
, q0
, q1
;
911 Load16x4(u
, v
, stride
, &t2
, &t1
, &p1
, &p0
); // p3, p2, p1, p0
912 MAX_DIFF1(t2
, t1
, p1
, p0
, mask
);
914 u
+= 4; // beginning of q0
916 Load16x4(u
, v
, stride
, &q0
, &q1
, &t1
, &t2
); // q0, q1, q2, q3
917 MAX_DIFF2(t2
, t1
, q1
, q0
, mask
);
919 COMPLEX_FL_MASK(p1
, p0
, q0
, q1
, thresh
, ithresh
, mask
);
920 DoFilter4(&p1
, &p0
, &q0
, &q1
, &mask
, hev_thresh
);
922 u
-= 2; // beginning of p1
924 Store16x4(u
, v
, stride
, &p1
, &p0
, &q0
, &q1
);
927 #endif // WEBP_USE_SSE2
929 //------------------------------------------------------------------------------
932 extern void VP8DspInitSSE2(void);
934 void VP8DspInitSSE2(void) {
935 #if defined(WEBP_USE_SSE2)
936 VP8Transform
= TransformSSE2
;
937 #if defined(USE_TRANSFORM_AC3)
938 VP8TransformAC3
= TransformAC3SSE2
;
941 VP8VFilter16
= VFilter16SSE2
;
942 VP8HFilter16
= HFilter16SSE2
;
943 VP8VFilter8
= VFilter8SSE2
;
944 VP8HFilter8
= HFilter8SSE2
;
945 VP8VFilter16i
= VFilter16iSSE2
;
946 VP8HFilter16i
= HFilter16iSSE2
;
947 VP8VFilter8i
= VFilter8iSSE2
;
948 VP8HFilter8i
= HFilter8iSSE2
;
950 VP8SimpleVFilter16
= SimpleVFilter16SSE2
;
951 VP8SimpleHFilter16
= SimpleHFilter16SSE2
;
952 VP8SimpleVFilter16i
= SimpleVFilter16iSSE2
;
953 VP8SimpleHFilter16i
= SimpleHFilter16iSSE2
;
954 #endif // WEBP_USE_SSE2