avformat/mpeg: demux ivtv captions
[ffmpeg.git] / libavcodec / me_cmp.c
blobf3e2f2482ef5865af213945a76cf7a93165fb8f3
1 /*
2 * DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include <stddef.h>
25 #include "libavutil/attributes.h"
26 #include "libavutil/internal.h"
27 #include "libavutil/mem_internal.h"
28 #include "avcodec.h"
29 #include "copy_block.h"
30 #include "simple_idct.h"
31 #include "me_cmp.h"
32 #include "mpegvideoenc.h"
33 #include "config.h"
34 #include "config_components.h"
36 /* (i - 256) * (i - 256) */
37 const uint32_t ff_square_tab[512] = {
38 65536, 65025, 64516, 64009, 63504, 63001, 62500, 62001, 61504, 61009, 60516, 60025, 59536, 59049, 58564, 58081,
39 57600, 57121, 56644, 56169, 55696, 55225, 54756, 54289, 53824, 53361, 52900, 52441, 51984, 51529, 51076, 50625,
40 50176, 49729, 49284, 48841, 48400, 47961, 47524, 47089, 46656, 46225, 45796, 45369, 44944, 44521, 44100, 43681,
41 43264, 42849, 42436, 42025, 41616, 41209, 40804, 40401, 40000, 39601, 39204, 38809, 38416, 38025, 37636, 37249,
42 36864, 36481, 36100, 35721, 35344, 34969, 34596, 34225, 33856, 33489, 33124, 32761, 32400, 32041, 31684, 31329,
43 30976, 30625, 30276, 29929, 29584, 29241, 28900, 28561, 28224, 27889, 27556, 27225, 26896, 26569, 26244, 25921,
44 25600, 25281, 24964, 24649, 24336, 24025, 23716, 23409, 23104, 22801, 22500, 22201, 21904, 21609, 21316, 21025,
45 20736, 20449, 20164, 19881, 19600, 19321, 19044, 18769, 18496, 18225, 17956, 17689, 17424, 17161, 16900, 16641,
46 16384, 16129, 15876, 15625, 15376, 15129, 14884, 14641, 14400, 14161, 13924, 13689, 13456, 13225, 12996, 12769,
47 12544, 12321, 12100, 11881, 11664, 11449, 11236, 11025, 10816, 10609, 10404, 10201, 10000, 9801, 9604, 9409,
48 9216, 9025, 8836, 8649, 8464, 8281, 8100, 7921, 7744, 7569, 7396, 7225, 7056, 6889, 6724, 6561,
49 6400, 6241, 6084, 5929, 5776, 5625, 5476, 5329, 5184, 5041, 4900, 4761, 4624, 4489, 4356, 4225,
50 4096, 3969, 3844, 3721, 3600, 3481, 3364, 3249, 3136, 3025, 2916, 2809, 2704, 2601, 2500, 2401,
51 2304, 2209, 2116, 2025, 1936, 1849, 1764, 1681, 1600, 1521, 1444, 1369, 1296, 1225, 1156, 1089,
52 1024, 961, 900, 841, 784, 729, 676, 625, 576, 529, 484, 441, 400, 361, 324, 289,
53 256, 225, 196, 169, 144, 121, 100, 81, 64, 49, 36, 25, 16, 9, 4, 1,
54 0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144, 169, 196, 225,
55 256, 289, 324, 361, 400, 441, 484, 529, 576, 625, 676, 729, 784, 841, 900, 961,
56 1024, 1089, 1156, 1225, 1296, 1369, 1444, 1521, 1600, 1681, 1764, 1849, 1936, 2025, 2116, 2209,
57 2304, 2401, 2500, 2601, 2704, 2809, 2916, 3025, 3136, 3249, 3364, 3481, 3600, 3721, 3844, 3969,
58 4096, 4225, 4356, 4489, 4624, 4761, 4900, 5041, 5184, 5329, 5476, 5625, 5776, 5929, 6084, 6241,
59 6400, 6561, 6724, 6889, 7056, 7225, 7396, 7569, 7744, 7921, 8100, 8281, 8464, 8649, 8836, 9025,
60 9216, 9409, 9604, 9801, 10000, 10201, 10404, 10609, 10816, 11025, 11236, 11449, 11664, 11881, 12100, 12321,
61 12544, 12769, 12996, 13225, 13456, 13689, 13924, 14161, 14400, 14641, 14884, 15129, 15376, 15625, 15876, 16129,
62 16384, 16641, 16900, 17161, 17424, 17689, 17956, 18225, 18496, 18769, 19044, 19321, 19600, 19881, 20164, 20449,
63 20736, 21025, 21316, 21609, 21904, 22201, 22500, 22801, 23104, 23409, 23716, 24025, 24336, 24649, 24964, 25281,
64 25600, 25921, 26244, 26569, 26896, 27225, 27556, 27889, 28224, 28561, 28900, 29241, 29584, 29929, 30276, 30625,
65 30976, 31329, 31684, 32041, 32400, 32761, 33124, 33489, 33856, 34225, 34596, 34969, 35344, 35721, 36100, 36481,
66 36864, 37249, 37636, 38025, 38416, 38809, 39204, 39601, 40000, 40401, 40804, 41209, 41616, 42025, 42436, 42849,
67 43264, 43681, 44100, 44521, 44944, 45369, 45796, 46225, 46656, 47089, 47524, 47961, 48400, 48841, 49284, 49729,
68 50176, 50625, 51076, 51529, 51984, 52441, 52900, 53361, 53824, 54289, 54756, 55225, 55696, 56169, 56644, 57121,
69 57600, 58081, 58564, 59049, 59536, 60025, 60516, 61009, 61504, 62001, 62500, 63001, 63504, 64009, 64516, 65025,
72 static int sse4_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
73 ptrdiff_t stride, int h)
75 int s = 0, i;
76 const uint32_t *sq = ff_square_tab + 256;
78 for (i = 0; i < h; i++) {
79 s += sq[pix1[0] - pix2[0]];
80 s += sq[pix1[1] - pix2[1]];
81 s += sq[pix1[2] - pix2[2]];
82 s += sq[pix1[3] - pix2[3]];
83 pix1 += stride;
84 pix2 += stride;
86 return s;
89 static int sse8_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
90 ptrdiff_t stride, int h)
92 int s = 0, i;
93 const uint32_t *sq = ff_square_tab + 256;
95 for (i = 0; i < h; i++) {
96 s += sq[pix1[0] - pix2[0]];
97 s += sq[pix1[1] - pix2[1]];
98 s += sq[pix1[2] - pix2[2]];
99 s += sq[pix1[3] - pix2[3]];
100 s += sq[pix1[4] - pix2[4]];
101 s += sq[pix1[5] - pix2[5]];
102 s += sq[pix1[6] - pix2[6]];
103 s += sq[pix1[7] - pix2[7]];
104 pix1 += stride;
105 pix2 += stride;
107 return s;
110 static int sse16_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
111 ptrdiff_t stride, int h)
113 int s = 0, i;
114 const uint32_t *sq = ff_square_tab + 256;
116 for (i = 0; i < h; i++) {
117 s += sq[pix1[0] - pix2[0]];
118 s += sq[pix1[1] - pix2[1]];
119 s += sq[pix1[2] - pix2[2]];
120 s += sq[pix1[3] - pix2[3]];
121 s += sq[pix1[4] - pix2[4]];
122 s += sq[pix1[5] - pix2[5]];
123 s += sq[pix1[6] - pix2[6]];
124 s += sq[pix1[7] - pix2[7]];
125 s += sq[pix1[8] - pix2[8]];
126 s += sq[pix1[9] - pix2[9]];
127 s += sq[pix1[10] - pix2[10]];
128 s += sq[pix1[11] - pix2[11]];
129 s += sq[pix1[12] - pix2[12]];
130 s += sq[pix1[13] - pix2[13]];
131 s += sq[pix1[14] - pix2[14]];
132 s += sq[pix1[15] - pix2[15]];
134 pix1 += stride;
135 pix2 += stride;
137 return s;
140 static int sum_abs_dctelem_c(const int16_t *block)
142 int sum = 0, i;
144 for (i = 0; i < 64; i++)
145 sum += FFABS(block[i]);
146 return sum;
149 #define avg2(a, b) (((a) + (b) + 1) >> 1)
150 #define avg4(a, b, c, d) (((a) + (b) + (c) + (d) + 2) >> 2)
152 static inline int pix_abs16_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
153 ptrdiff_t stride, int h)
155 int s = 0, i;
157 for (i = 0; i < h; i++) {
158 s += abs(pix1[0] - pix2[0]);
159 s += abs(pix1[1] - pix2[1]);
160 s += abs(pix1[2] - pix2[2]);
161 s += abs(pix1[3] - pix2[3]);
162 s += abs(pix1[4] - pix2[4]);
163 s += abs(pix1[5] - pix2[5]);
164 s += abs(pix1[6] - pix2[6]);
165 s += abs(pix1[7] - pix2[7]);
166 s += abs(pix1[8] - pix2[8]);
167 s += abs(pix1[9] - pix2[9]);
168 s += abs(pix1[10] - pix2[10]);
169 s += abs(pix1[11] - pix2[11]);
170 s += abs(pix1[12] - pix2[12]);
171 s += abs(pix1[13] - pix2[13]);
172 s += abs(pix1[14] - pix2[14]);
173 s += abs(pix1[15] - pix2[15]);
174 pix1 += stride;
175 pix2 += stride;
177 return s;
180 static inline int pix_median_abs16_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
181 ptrdiff_t stride, int h)
183 int s = 0, i, j;
185 #define V(x) (pix1[x] - pix2[x])
187 s += abs(V(0));
188 s += abs(V(1) - V(0));
189 s += abs(V(2) - V(1));
190 s += abs(V(3) - V(2));
191 s += abs(V(4) - V(3));
192 s += abs(V(5) - V(4));
193 s += abs(V(6) - V(5));
194 s += abs(V(7) - V(6));
195 s += abs(V(8) - V(7));
196 s += abs(V(9) - V(8));
197 s += abs(V(10) - V(9));
198 s += abs(V(11) - V(10));
199 s += abs(V(12) - V(11));
200 s += abs(V(13) - V(12));
201 s += abs(V(14) - V(13));
202 s += abs(V(15) - V(14));
204 pix1 += stride;
205 pix2 += stride;
207 for (i = 1; i < h; i++) {
208 s += abs(V(0) - V(-stride));
209 for (j = 1; j < 16; j++)
210 s += abs(V(j) - mid_pred(V(j-stride), V(j-1), V(j-stride) + V(j-1) - V(j-stride-1)));
211 pix1 += stride;
212 pix2 += stride;
215 #undef V
216 return s;
219 static int pix_abs16_x2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
220 ptrdiff_t stride, int h)
222 int s = 0, i;
224 for (i = 0; i < h; i++) {
225 s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
226 s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
227 s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
228 s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
229 s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
230 s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
231 s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
232 s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
233 s += abs(pix1[8] - avg2(pix2[8], pix2[9]));
234 s += abs(pix1[9] - avg2(pix2[9], pix2[10]));
235 s += abs(pix1[10] - avg2(pix2[10], pix2[11]));
236 s += abs(pix1[11] - avg2(pix2[11], pix2[12]));
237 s += abs(pix1[12] - avg2(pix2[12], pix2[13]));
238 s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
239 s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
240 s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
241 pix1 += stride;
242 pix2 += stride;
244 return s;
247 static int pix_abs16_y2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
248 ptrdiff_t stride, int h)
250 int s = 0, i;
251 const uint8_t *pix3 = pix2 + stride;
253 for (i = 0; i < h; i++) {
254 s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
255 s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
256 s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
257 s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
258 s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
259 s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
260 s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
261 s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
262 s += abs(pix1[8] - avg2(pix2[8], pix3[8]));
263 s += abs(pix1[9] - avg2(pix2[9], pix3[9]));
264 s += abs(pix1[10] - avg2(pix2[10], pix3[10]));
265 s += abs(pix1[11] - avg2(pix2[11], pix3[11]));
266 s += abs(pix1[12] - avg2(pix2[12], pix3[12]));
267 s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
268 s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
269 s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
270 pix1 += stride;
271 pix2 += stride;
272 pix3 += stride;
274 return s;
277 static int pix_abs16_xy2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
278 ptrdiff_t stride, int h)
280 int s = 0, i;
281 const uint8_t *pix3 = pix2 + stride;
283 for (i = 0; i < h; i++) {
284 s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
285 s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
286 s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
287 s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
288 s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
289 s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
290 s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
291 s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
292 s += abs(pix1[8] - avg4(pix2[8], pix2[9], pix3[8], pix3[9]));
293 s += abs(pix1[9] - avg4(pix2[9], pix2[10], pix3[9], pix3[10]));
294 s += abs(pix1[10] - avg4(pix2[10], pix2[11], pix3[10], pix3[11]));
295 s += abs(pix1[11] - avg4(pix2[11], pix2[12], pix3[11], pix3[12]));
296 s += abs(pix1[12] - avg4(pix2[12], pix2[13], pix3[12], pix3[13]));
297 s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
298 s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
299 s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
300 pix1 += stride;
301 pix2 += stride;
302 pix3 += stride;
304 return s;
307 static inline int pix_abs8_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
308 ptrdiff_t stride, int h)
310 int s = 0, i;
312 for (i = 0; i < h; i++) {
313 s += abs(pix1[0] - pix2[0]);
314 s += abs(pix1[1] - pix2[1]);
315 s += abs(pix1[2] - pix2[2]);
316 s += abs(pix1[3] - pix2[3]);
317 s += abs(pix1[4] - pix2[4]);
318 s += abs(pix1[5] - pix2[5]);
319 s += abs(pix1[6] - pix2[6]);
320 s += abs(pix1[7] - pix2[7]);
321 pix1 += stride;
322 pix2 += stride;
324 return s;
327 static inline int pix_median_abs8_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
328 ptrdiff_t stride, int h)
330 int s = 0, i, j;
332 #define V(x) (pix1[x] - pix2[x])
334 s += abs(V(0));
335 s += abs(V(1) - V(0));
336 s += abs(V(2) - V(1));
337 s += abs(V(3) - V(2));
338 s += abs(V(4) - V(3));
339 s += abs(V(5) - V(4));
340 s += abs(V(6) - V(5));
341 s += abs(V(7) - V(6));
343 pix1 += stride;
344 pix2 += stride;
346 for (i = 1; i < h; i++) {
347 s += abs(V(0) - V(-stride));
348 for (j = 1; j < 8; j++)
349 s += abs(V(j) - mid_pred(V(j-stride), V(j-1), V(j-stride) + V(j-1) - V(j-stride-1)));
350 pix1 += stride;
351 pix2 += stride;
354 #undef V
355 return s;
358 static int pix_abs8_x2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
359 ptrdiff_t stride, int h)
361 int s = 0, i;
363 for (i = 0; i < h; i++) {
364 s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
365 s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
366 s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
367 s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
368 s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
369 s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
370 s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
371 s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
372 pix1 += stride;
373 pix2 += stride;
375 return s;
378 static int pix_abs8_y2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
379 ptrdiff_t stride, int h)
381 int s = 0, i;
382 const uint8_t *pix3 = pix2 + stride;
384 for (i = 0; i < h; i++) {
385 s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
386 s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
387 s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
388 s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
389 s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
390 s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
391 s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
392 s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
393 pix1 += stride;
394 pix2 += stride;
395 pix3 += stride;
397 return s;
400 static int pix_abs8_xy2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
401 ptrdiff_t stride, int h)
403 int s = 0, i;
404 const uint8_t *pix3 = pix2 + stride;
406 for (i = 0; i < h; i++) {
407 s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
408 s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
409 s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
410 s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
411 s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
412 s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
413 s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
414 s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
415 pix1 += stride;
416 pix2 += stride;
417 pix3 += stride;
419 return s;
422 static int nsse16_c(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2,
423 ptrdiff_t stride, int h)
425 int score1 = 0, score2 = 0, x, y;
427 for (y = 0; y < h; y++) {
428 for (x = 0; x < 16; x++)
429 score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
430 if (y + 1 < h) {
431 for (x = 0; x < 15; x++)
432 score2 += FFABS(s1[x] - s1[x + stride] -
433 s1[x + 1] + s1[x + stride + 1]) -
434 FFABS(s2[x] - s2[x + stride] -
435 s2[x + 1] + s2[x + stride + 1]);
437 s1 += stride;
438 s2 += stride;
441 if (c)
442 return score1 + FFABS(score2) * c->avctx->nsse_weight;
443 else
444 return score1 + FFABS(score2) * 8;
447 static int nsse8_c(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2,
448 ptrdiff_t stride, int h)
450 int score1 = 0, score2 = 0, x, y;
452 for (y = 0; y < h; y++) {
453 for (x = 0; x < 8; x++)
454 score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
455 if (y + 1 < h) {
456 for (x = 0; x < 7; x++)
457 score2 += FFABS(s1[x] - s1[x + stride] -
458 s1[x + 1] + s1[x + stride + 1]) -
459 FFABS(s2[x] - s2[x + stride] -
460 s2[x + 1] + s2[x + stride + 1]);
462 s1 += stride;
463 s2 += stride;
466 if (c)
467 return score1 + FFABS(score2) * c->avctx->nsse_weight;
468 else
469 return score1 + FFABS(score2) * 8;
472 static int zero_cmp(MpegEncContext *s, const uint8_t *a, const uint8_t *b,
473 ptrdiff_t stride, int h)
475 return 0;
478 av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mpvenc)
480 #define ENTRY(CMP_FLAG, ARRAY, MPVENC_ONLY) \
481 [FF_CMP_ ## CMP_FLAG] = { \
482 .offset = offsetof(MECmpContext, ARRAY), \
483 .mpv_only = MPVENC_ONLY, \
484 .available = 1, \
486 static const struct {
487 char available;
488 char mpv_only;
489 uint16_t offset;
490 } cmp_func_list[] = {
491 ENTRY(SAD, sad, 0),
492 ENTRY(SSE, sse, 0),
493 ENTRY(SATD, hadamard8_diff, 0),
494 ENTRY(DCT, dct_sad, 1),
495 ENTRY(PSNR, quant_psnr, 1),
496 ENTRY(BIT, bit, 1),
497 ENTRY(RD, rd, 1),
498 ENTRY(VSAD, vsad, 0),
499 ENTRY(VSSE, vsse, 0),
500 ENTRY(NSSE, nsse, 0),
501 #if CONFIG_SNOW_DECODER || CONFIG_SNOW_ENCODER
502 ENTRY(W53, w53, 0),
503 ENTRY(W97, w97, 0),
504 #endif
505 ENTRY(DCTMAX, dct_max, 1),
506 #if CONFIG_GPL
507 ENTRY(DCT264, dct264_sad, 1),
508 #endif
509 ENTRY(MEDIAN_SAD, median_sad, 0),
511 const me_cmp_func *me_cmp_func_array;
513 type &= 0xFF;
515 if (type == FF_CMP_ZERO) {
516 for (int i = 0; i < 6; i++)
517 cmp[i] = zero_cmp;
518 return 0;
520 if (type >= FF_ARRAY_ELEMS(cmp_func_list) ||
521 !cmp_func_list[type].available ||
522 !mpvenc && cmp_func_list[type].mpv_only) {
523 av_log(NULL, AV_LOG_ERROR,
524 "invalid cmp function selection\n");
525 return AVERROR(EINVAL);
527 me_cmp_func_array = (const me_cmp_func*)(((const char*)c) + cmp_func_list[type].offset);
528 for (int i = 0; i < 6; i++)
529 cmp[i] = me_cmp_func_array[i];
531 return 0;
534 #define BUTTERFLY2(o1, o2, i1, i2) \
535 o1 = (i1) + (i2); \
536 o2 = (i1) - (i2);
538 #define BUTTERFLY1(x, y) \
540 int a, b; \
541 a = x; \
542 b = y; \
543 x = a + b; \
544 y = a - b; \
547 #define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y)))
549 static int hadamard8_diff8x8_c(MpegEncContext *s, const uint8_t *dst,
550 const uint8_t *src, ptrdiff_t stride, int h)
552 int i, temp[64], sum = 0;
554 for (i = 0; i < 8; i++) {
555 // FIXME: try pointer walks
556 BUTTERFLY2(temp[8 * i + 0], temp[8 * i + 1],
557 src[stride * i + 0] - dst[stride * i + 0],
558 src[stride * i + 1] - dst[stride * i + 1]);
559 BUTTERFLY2(temp[8 * i + 2], temp[8 * i + 3],
560 src[stride * i + 2] - dst[stride * i + 2],
561 src[stride * i + 3] - dst[stride * i + 3]);
562 BUTTERFLY2(temp[8 * i + 4], temp[8 * i + 5],
563 src[stride * i + 4] - dst[stride * i + 4],
564 src[stride * i + 5] - dst[stride * i + 5]);
565 BUTTERFLY2(temp[8 * i + 6], temp[8 * i + 7],
566 src[stride * i + 6] - dst[stride * i + 6],
567 src[stride * i + 7] - dst[stride * i + 7]);
569 BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 2]);
570 BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 3]);
571 BUTTERFLY1(temp[8 * i + 4], temp[8 * i + 6]);
572 BUTTERFLY1(temp[8 * i + 5], temp[8 * i + 7]);
574 BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 4]);
575 BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 5]);
576 BUTTERFLY1(temp[8 * i + 2], temp[8 * i + 6]);
577 BUTTERFLY1(temp[8 * i + 3], temp[8 * i + 7]);
580 for (i = 0; i < 8; i++) {
581 BUTTERFLY1(temp[8 * 0 + i], temp[8 * 1 + i]);
582 BUTTERFLY1(temp[8 * 2 + i], temp[8 * 3 + i]);
583 BUTTERFLY1(temp[8 * 4 + i], temp[8 * 5 + i]);
584 BUTTERFLY1(temp[8 * 6 + i], temp[8 * 7 + i]);
586 BUTTERFLY1(temp[8 * 0 + i], temp[8 * 2 + i]);
587 BUTTERFLY1(temp[8 * 1 + i], temp[8 * 3 + i]);
588 BUTTERFLY1(temp[8 * 4 + i], temp[8 * 6 + i]);
589 BUTTERFLY1(temp[8 * 5 + i], temp[8 * 7 + i]);
591 sum += BUTTERFLYA(temp[8 * 0 + i], temp[8 * 4 + i]) +
592 BUTTERFLYA(temp[8 * 1 + i], temp[8 * 5 + i]) +
593 BUTTERFLYA(temp[8 * 2 + i], temp[8 * 6 + i]) +
594 BUTTERFLYA(temp[8 * 3 + i], temp[8 * 7 + i]);
596 return sum;
599 static int hadamard8_intra8x8_c(MpegEncContext *s, const uint8_t *src,
600 const uint8_t *dummy, ptrdiff_t stride, int h)
602 int i, temp[64], sum = 0;
604 for (i = 0; i < 8; i++) {
605 // FIXME: try pointer walks
606 BUTTERFLY2(temp[8 * i + 0], temp[8 * i + 1],
607 src[stride * i + 0], src[stride * i + 1]);
608 BUTTERFLY2(temp[8 * i + 2], temp[8 * i + 3],
609 src[stride * i + 2], src[stride * i + 3]);
610 BUTTERFLY2(temp[8 * i + 4], temp[8 * i + 5],
611 src[stride * i + 4], src[stride * i + 5]);
612 BUTTERFLY2(temp[8 * i + 6], temp[8 * i + 7],
613 src[stride * i + 6], src[stride * i + 7]);
615 BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 2]);
616 BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 3]);
617 BUTTERFLY1(temp[8 * i + 4], temp[8 * i + 6]);
618 BUTTERFLY1(temp[8 * i + 5], temp[8 * i + 7]);
620 BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 4]);
621 BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 5]);
622 BUTTERFLY1(temp[8 * i + 2], temp[8 * i + 6]);
623 BUTTERFLY1(temp[8 * i + 3], temp[8 * i + 7]);
626 for (i = 0; i < 8; i++) {
627 BUTTERFLY1(temp[8 * 0 + i], temp[8 * 1 + i]);
628 BUTTERFLY1(temp[8 * 2 + i], temp[8 * 3 + i]);
629 BUTTERFLY1(temp[8 * 4 + i], temp[8 * 5 + i]);
630 BUTTERFLY1(temp[8 * 6 + i], temp[8 * 7 + i]);
632 BUTTERFLY1(temp[8 * 0 + i], temp[8 * 2 + i]);
633 BUTTERFLY1(temp[8 * 1 + i], temp[8 * 3 + i]);
634 BUTTERFLY1(temp[8 * 4 + i], temp[8 * 6 + i]);
635 BUTTERFLY1(temp[8 * 5 + i], temp[8 * 7 + i]);
637 sum +=
638 BUTTERFLYA(temp[8 * 0 + i], temp[8 * 4 + i])
639 + BUTTERFLYA(temp[8 * 1 + i], temp[8 * 5 + i])
640 + BUTTERFLYA(temp[8 * 2 + i], temp[8 * 6 + i])
641 + BUTTERFLYA(temp[8 * 3 + i], temp[8 * 7 + i]);
644 sum -= FFABS(temp[8 * 0] + temp[8 * 4]); // -mean
646 return sum;
649 static int dct_sad8x8_c(MpegEncContext *s, const uint8_t *src1,
650 const uint8_t *src2, ptrdiff_t stride, int h)
652 LOCAL_ALIGNED_16(int16_t, temp, [64]);
654 s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
655 s->fdsp.fdct(temp);
656 return s->sum_abs_dctelem(temp);
659 #if CONFIG_GPL
660 #define DCT8_1D \
662 const int s07 = SRC(0) + SRC(7); \
663 const int s16 = SRC(1) + SRC(6); \
664 const int s25 = SRC(2) + SRC(5); \
665 const int s34 = SRC(3) + SRC(4); \
666 const int a0 = s07 + s34; \
667 const int a1 = s16 + s25; \
668 const int a2 = s07 - s34; \
669 const int a3 = s16 - s25; \
670 const int d07 = SRC(0) - SRC(7); \
671 const int d16 = SRC(1) - SRC(6); \
672 const int d25 = SRC(2) - SRC(5); \
673 const int d34 = SRC(3) - SRC(4); \
674 const int a4 = d16 + d25 + (d07 + (d07 >> 1)); \
675 const int a5 = d07 - d34 - (d25 + (d25 >> 1)); \
676 const int a6 = d07 + d34 - (d16 + (d16 >> 1)); \
677 const int a7 = d16 - d25 + (d34 + (d34 >> 1)); \
678 DST(0, a0 + a1); \
679 DST(1, a4 + (a7 >> 2)); \
680 DST(2, a2 + (a3 >> 1)); \
681 DST(3, a5 + (a6 >> 2)); \
682 DST(4, a0 - a1); \
683 DST(5, a6 - (a5 >> 2)); \
684 DST(6, (a2 >> 1) - a3); \
685 DST(7, (a4 >> 2) - a7); \
688 static int dct264_sad8x8_c(MpegEncContext *s, const uint8_t *src1,
689 const uint8_t *src2, ptrdiff_t stride, int h)
691 int16_t dct[8][8];
692 int i, sum = 0;
694 s->pdsp.diff_pixels_unaligned(dct[0], src1, src2, stride);
696 #define SRC(x) dct[i][x]
697 #define DST(x, v) dct[i][x] = v
698 for (i = 0; i < 8; i++)
699 DCT8_1D
700 #undef SRC
701 #undef DST
703 #define SRC(x) dct[x][i]
704 #define DST(x, v) sum += FFABS(v)
705 for (i = 0; i < 8; i++)
706 DCT8_1D
707 #undef SRC
708 #undef DST
709 return sum;
711 #endif
713 static int dct_max8x8_c(MpegEncContext *s, const uint8_t *src1,
714 const uint8_t *src2, ptrdiff_t stride, int h)
716 LOCAL_ALIGNED_16(int16_t, temp, [64]);
717 int sum = 0, i;
719 s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
720 s->fdsp.fdct(temp);
722 for (i = 0; i < 64; i++)
723 sum = FFMAX(sum, FFABS(temp[i]));
725 return sum;
728 static int quant_psnr8x8_c(MpegEncContext *s, const uint8_t *src1,
729 const uint8_t *src2, ptrdiff_t stride, int h)
731 LOCAL_ALIGNED_16(int16_t, temp, [64 * 2]);
732 int16_t *const bak = temp + 64;
733 int sum = 0, i;
735 s->mb_intra = 0;
737 s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
739 memcpy(bak, temp, 64 * sizeof(int16_t));
741 s->block_last_index[0 /* FIXME */] =
742 s->dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
743 s->dct_unquantize_inter(s, temp, 0, s->qscale);
744 ff_simple_idct_int16_8bit(temp); // FIXME
746 for (i = 0; i < 64; i++)
747 sum += (temp[i] - bak[i]) * (temp[i] - bak[i]);
749 return sum;
752 static int rd8x8_c(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2,
753 ptrdiff_t stride, int h)
755 const uint8_t *scantable = s->intra_scantable.permutated;
756 LOCAL_ALIGNED_16(int16_t, temp, [64]);
757 LOCAL_ALIGNED_16(uint8_t, lsrc1, [64]);
758 LOCAL_ALIGNED_16(uint8_t, lsrc2, [64]);
759 int i, last, run, bits, level, distortion, start_i;
760 const int esc_length = s->ac_esc_length;
761 uint8_t *length, *last_length;
763 copy_block8(lsrc1, src1, 8, stride, 8);
764 copy_block8(lsrc2, src2, 8, stride, 8);
766 s->pdsp.diff_pixels(temp, lsrc1, lsrc2, 8);
768 s->block_last_index[0 /* FIXME */] =
769 last =
770 s->dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
772 bits = 0;
774 if (s->mb_intra) {
775 start_i = 1;
776 length = s->intra_ac_vlc_length;
777 last_length = s->intra_ac_vlc_last_length;
778 bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma
779 } else {
780 start_i = 0;
781 length = s->inter_ac_vlc_length;
782 last_length = s->inter_ac_vlc_last_length;
785 if (last >= start_i) {
786 run = 0;
787 for (i = start_i; i < last; i++) {
788 int j = scantable[i];
789 level = temp[j];
791 if (level) {
792 level += 64;
793 if ((level & (~127)) == 0)
794 bits += length[UNI_AC_ENC_INDEX(run, level)];
795 else
796 bits += esc_length;
797 run = 0;
798 } else
799 run++;
801 i = scantable[last];
803 level = temp[i] + 64;
805 av_assert2(level - 64);
807 if ((level & (~127)) == 0) {
808 bits += last_length[UNI_AC_ENC_INDEX(run, level)];
809 } else
810 bits += esc_length;
813 if (last >= 0) {
814 if (s->mb_intra)
815 s->dct_unquantize_intra(s, temp, 0, s->qscale);
816 else
817 s->dct_unquantize_inter(s, temp, 0, s->qscale);
820 s->idsp.idct_add(lsrc2, 8, temp);
822 distortion = s->sse_cmp[1](NULL, lsrc2, lsrc1, 8, 8);
824 return distortion + ((bits * s->qscale * s->qscale * 109 + 64) >> 7);
827 static int bit8x8_c(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2,
828 ptrdiff_t stride, int h)
830 const uint8_t *scantable = s->intra_scantable.permutated;
831 LOCAL_ALIGNED_16(int16_t, temp, [64]);
832 int i, last, run, bits, level, start_i;
833 const int esc_length = s->ac_esc_length;
834 uint8_t *length, *last_length;
836 s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
838 s->block_last_index[0 /* FIXME */] =
839 last =
840 s->dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
842 bits = 0;
844 if (s->mb_intra) {
845 start_i = 1;
846 length = s->intra_ac_vlc_length;
847 last_length = s->intra_ac_vlc_last_length;
848 bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma
849 } else {
850 start_i = 0;
851 length = s->inter_ac_vlc_length;
852 last_length = s->inter_ac_vlc_last_length;
855 if (last >= start_i) {
856 run = 0;
857 for (i = start_i; i < last; i++) {
858 int j = scantable[i];
859 level = temp[j];
861 if (level) {
862 level += 64;
863 if ((level & (~127)) == 0)
864 bits += length[UNI_AC_ENC_INDEX(run, level)];
865 else
866 bits += esc_length;
867 run = 0;
868 } else
869 run++;
871 i = scantable[last];
873 level = temp[i] + 64;
875 av_assert2(level - 64);
877 if ((level & (~127)) == 0)
878 bits += last_length[UNI_AC_ENC_INDEX(run, level)];
879 else
880 bits += esc_length;
883 return bits;
886 #define VSAD_INTRA(size) \
887 static int vsad_intra ## size ## _c(MpegEncContext *c, \
888 const uint8_t *s, const uint8_t *dummy, \
889 ptrdiff_t stride, int h) \
891 int score = 0, x, y; \
893 for (y = 1; y < h; y++) { \
894 for (x = 0; x < size; x += 4) { \
895 score += FFABS(s[x] - s[x + stride]) + \
896 FFABS(s[x + 1] - s[x + stride + 1]) + \
897 FFABS(s[x + 2] - s[x + 2 + stride]) + \
898 FFABS(s[x + 3] - s[x + 3 + stride]); \
900 s += stride; \
903 return score; \
905 VSAD_INTRA(8)
906 VSAD_INTRA(16)
908 #define VSAD(size) \
909 static int vsad ## size ## _c(MpegEncContext *c, \
910 const uint8_t *s1, const uint8_t *s2, \
911 ptrdiff_t stride, int h) \
913 int score = 0, x, y; \
915 for (y = 1; y < h; y++) { \
916 for (x = 0; x < size; x++) \
917 score += FFABS(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]); \
918 s1 += stride; \
919 s2 += stride; \
922 return score; \
924 VSAD(8)
925 VSAD(16)
927 #define SQ(a) ((a) * (a))
928 #define VSSE_INTRA(size) \
929 static int vsse_intra ## size ## _c(MpegEncContext *c, \
930 const uint8_t *s, const uint8_t *dummy, \
931 ptrdiff_t stride, int h) \
933 int score = 0, x, y; \
935 for (y = 1; y < h; y++) { \
936 for (x = 0; x < size; x += 4) { \
937 score += SQ(s[x] - s[x + stride]) + \
938 SQ(s[x + 1] - s[x + stride + 1]) + \
939 SQ(s[x + 2] - s[x + stride + 2]) + \
940 SQ(s[x + 3] - s[x + stride + 3]); \
942 s += stride; \
945 return score; \
947 VSSE_INTRA(8)
948 VSSE_INTRA(16)
950 #define VSSE(size) \
951 static int vsse ## size ## _c(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, \
952 ptrdiff_t stride, int h) \
954 int score = 0, x, y; \
956 for (y = 1; y < h; y++) { \
957 for (x = 0; x < size; x++) \
958 score += SQ(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]); \
959 s1 += stride; \
960 s2 += stride; \
963 return score; \
965 VSSE(8)
966 VSSE(16)
968 #define WRAPPER8_16_SQ(name8, name16) \
969 static int name16(MpegEncContext *s, const uint8_t *dst, const uint8_t *src, \
970 ptrdiff_t stride, int h) \
972 int score = 0; \
974 score += name8(s, dst, src, stride, 8); \
975 score += name8(s, dst + 8, src + 8, stride, 8); \
976 if (h == 16) { \
977 dst += 8 * stride; \
978 src += 8 * stride; \
979 score += name8(s, dst, src, stride, 8); \
980 score += name8(s, dst + 8, src + 8, stride, 8); \
982 return score; \
985 WRAPPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c)
986 WRAPPER8_16_SQ(hadamard8_intra8x8_c, hadamard8_intra16_c)
987 WRAPPER8_16_SQ(dct_sad8x8_c, dct_sad16_c)
988 #if CONFIG_GPL
989 WRAPPER8_16_SQ(dct264_sad8x8_c, dct264_sad16_c)
990 #endif
991 WRAPPER8_16_SQ(dct_max8x8_c, dct_max16_c)
992 WRAPPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c)
993 WRAPPER8_16_SQ(rd8x8_c, rd16_c)
994 WRAPPER8_16_SQ(bit8x8_c, bit16_c)
996 av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
998 memset(c, 0, sizeof(*c));
1000 c->sum_abs_dctelem = sum_abs_dctelem_c;
1002 /* TODO [0] 16 [1] 8 */
1003 c->pix_abs[0][0] = pix_abs16_c;
1004 c->pix_abs[0][1] = pix_abs16_x2_c;
1005 c->pix_abs[0][2] = pix_abs16_y2_c;
1006 c->pix_abs[0][3] = pix_abs16_xy2_c;
1007 c->pix_abs[1][0] = pix_abs8_c;
1008 c->pix_abs[1][1] = pix_abs8_x2_c;
1009 c->pix_abs[1][2] = pix_abs8_y2_c;
1010 c->pix_abs[1][3] = pix_abs8_xy2_c;
1012 #define SET_CMP_FUNC(name) \
1013 c->name[0] = name ## 16_c; \
1014 c->name[1] = name ## 8x8_c;
1016 SET_CMP_FUNC(hadamard8_diff)
1017 c->hadamard8_diff[4] = hadamard8_intra16_c;
1018 c->hadamard8_diff[5] = hadamard8_intra8x8_c;
1019 SET_CMP_FUNC(dct_sad)
1020 SET_CMP_FUNC(dct_max)
1021 #if CONFIG_GPL
1022 SET_CMP_FUNC(dct264_sad)
1023 #endif
1024 c->sad[0] = pix_abs16_c;
1025 c->sad[1] = pix_abs8_c;
1026 c->sse[0] = sse16_c;
1027 c->sse[1] = sse8_c;
1028 c->sse[2] = sse4_c;
1029 SET_CMP_FUNC(quant_psnr)
1030 SET_CMP_FUNC(rd)
1031 SET_CMP_FUNC(bit)
1032 c->vsad[0] = vsad16_c;
1033 c->vsad[1] = vsad8_c;
1034 c->vsad[4] = vsad_intra16_c;
1035 c->vsad[5] = vsad_intra8_c;
1036 c->vsse[0] = vsse16_c;
1037 c->vsse[1] = vsse8_c;
1038 c->vsse[4] = vsse_intra16_c;
1039 c->vsse[5] = vsse_intra8_c;
1040 c->nsse[0] = nsse16_c;
1041 c->nsse[1] = nsse8_c;
1042 #if CONFIG_SNOW_DECODER || CONFIG_SNOW_ENCODER
1043 ff_dsputil_init_dwt(c);
1044 #endif
1046 c->median_sad[0] = pix_median_abs16_c;
1047 c->median_sad[1] = pix_median_abs8_c;
1049 #if ARCH_AARCH64
1050 ff_me_cmp_init_aarch64(c, avctx);
1051 #elif ARCH_ARM
1052 ff_me_cmp_init_arm(c, avctx);
1053 #elif ARCH_PPC
1054 ff_me_cmp_init_ppc(c, avctx);
1055 #elif ARCH_RISCV
1056 ff_me_cmp_init_riscv(c, avctx);
1057 #elif ARCH_X86
1058 ff_me_cmp_init_x86(c, avctx);
1059 #elif ARCH_MIPS
1060 ff_me_cmp_init_mips(c, avctx);
1061 #endif