2 * MMX optimized DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard.
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
25 #include "libavutil/x86_cpu.h"
26 #include "libavcodec/dsputil.h"
27 #include "libavcodec/h263.h"
28 #include "libavcodec/mpegvideo.h"
29 #include "libavcodec/simple_idct.h"
30 #include "dsputil_mmx.h"
32 #include "vp3dsp_mmx.h"
33 #include "vp3dsp_sse2.h"
34 #include "idct_xvid.h"
39 int mm_flags
; /* multimedia extension flags */
41 /* pixel operations */
42 DECLARE_ALIGNED_8 (const uint64_t, ff_bone
) = 0x0101010101010101ULL
;
43 DECLARE_ALIGNED_8 (const uint64_t, ff_wtwo
) = 0x0002000200020002ULL
;
45 DECLARE_ALIGNED_16(const uint64_t, ff_pdw_80000000
[2]) =
46 {0x8000000080000000ULL
, 0x8000000080000000ULL
};
48 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_3
) = 0x0003000300030003ULL
;
49 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_4
) = 0x0004000400040004ULL
;
50 DECLARE_ALIGNED_16(const xmm_t
, ff_pw_5
) = {0x0005000500050005ULL
, 0x0005000500050005ULL
};
51 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_8
) = 0x0008000800080008ULL
;
52 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_15
) = 0x000F000F000F000FULL
;
53 DECLARE_ALIGNED_16(const xmm_t
, ff_pw_16
) = {0x0010001000100010ULL
, 0x0010001000100010ULL
};
54 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_20
) = 0x0014001400140014ULL
;
55 DECLARE_ALIGNED_16(const xmm_t
, ff_pw_28
) = {0x001C001C001C001CULL
, 0x001C001C001C001CULL
};
56 DECLARE_ALIGNED_16(const xmm_t
, ff_pw_32
) = {0x0020002000200020ULL
, 0x0020002000200020ULL
};
57 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_42
) = 0x002A002A002A002AULL
;
58 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_64
) = 0x0040004000400040ULL
;
59 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_96
) = 0x0060006000600060ULL
;
60 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_128
) = 0x0080008000800080ULL
;
61 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_255
) = 0x00ff00ff00ff00ffULL
;
63 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1
) = 0x0101010101010101ULL
;
64 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3
) = 0x0303030303030303ULL
;
65 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_7
) = 0x0707070707070707ULL
;
66 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3F
) = 0x3F3F3F3F3F3F3F3FULL
;
67 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_A1
) = 0xA1A1A1A1A1A1A1A1ULL
;
68 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_FC
) = 0xFCFCFCFCFCFCFCFCULL
;
70 DECLARE_ALIGNED_16(const double, ff_pd_1
[2]) = { 1.0, 1.0 };
71 DECLARE_ALIGNED_16(const double, ff_pd_2
[2]) = { 2.0, 2.0 };
73 #define JUMPALIGN() asm volatile (ASMALIGN(3)::)
74 #define MOVQ_ZERO(regd) asm volatile ("pxor %%" #regd ", %%" #regd ::)
76 #define MOVQ_BFE(regd) \
78 "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
79 "paddb %%" #regd ", %%" #regd " \n\t" ::)
82 #define MOVQ_BONE(regd) asm volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
83 #define MOVQ_WTWO(regd) asm volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
85 // for shared library it's better to use this way for accessing constants
87 #define MOVQ_BONE(regd) \
89 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
90 "psrlw $15, %%" #regd " \n\t" \
91 "packuswb %%" #regd ", %%" #regd " \n\t" ::)
93 #define MOVQ_WTWO(regd) \
95 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
96 "psrlw $15, %%" #regd " \n\t" \
97 "psllw $1, %%" #regd " \n\t"::)
101 // using regr as temporary and for the output result
102 // first argument is unmodifed and second is trashed
103 // regfe is supposed to contain 0xfefefefefefefefe
104 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
105 "movq " #rega ", " #regr " \n\t"\
106 "pand " #regb ", " #regr " \n\t"\
107 "pxor " #rega ", " #regb " \n\t"\
108 "pand " #regfe "," #regb " \n\t"\
109 "psrlq $1, " #regb " \n\t"\
110 "paddb " #regb ", " #regr " \n\t"
112 #define PAVGB_MMX(rega, regb, regr, regfe) \
113 "movq " #rega ", " #regr " \n\t"\
114 "por " #regb ", " #regr " \n\t"\
115 "pxor " #rega ", " #regb " \n\t"\
116 "pand " #regfe "," #regb " \n\t"\
117 "psrlq $1, " #regb " \n\t"\
118 "psubb " #regb ", " #regr " \n\t"
120 // mm6 is supposed to contain 0xfefefefefefefefe
121 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
122 "movq " #rega ", " #regr " \n\t"\
123 "movq " #regc ", " #regp " \n\t"\
124 "pand " #regb ", " #regr " \n\t"\
125 "pand " #regd ", " #regp " \n\t"\
126 "pxor " #rega ", " #regb " \n\t"\
127 "pxor " #regc ", " #regd " \n\t"\
128 "pand %%mm6, " #regb " \n\t"\
129 "pand %%mm6, " #regd " \n\t"\
130 "psrlq $1, " #regb " \n\t"\
131 "psrlq $1, " #regd " \n\t"\
132 "paddb " #regb ", " #regr " \n\t"\
133 "paddb " #regd ", " #regp " \n\t"
135 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
136 "movq " #rega ", " #regr " \n\t"\
137 "movq " #regc ", " #regp " \n\t"\
138 "por " #regb ", " #regr " \n\t"\
139 "por " #regd ", " #regp " \n\t"\
140 "pxor " #rega ", " #regb " \n\t"\
141 "pxor " #regc ", " #regd " \n\t"\
142 "pand %%mm6, " #regb " \n\t"\
143 "pand %%mm6, " #regd " \n\t"\
144 "psrlq $1, " #regd " \n\t"\
145 "psrlq $1, " #regb " \n\t"\
146 "psubb " #regb ", " #regr " \n\t"\
147 "psubb " #regd ", " #regp " \n\t"
149 /***********************************/
150 /* MMX no rounding */
151 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
152 #define SET_RND MOVQ_WONE
153 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
154 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
156 #include "dsputil_mmx_rnd.h"
162 /***********************************/
165 #define DEF(x, y) x ## _ ## y ##_mmx
166 #define SET_RND MOVQ_WTWO
167 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
168 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
170 #include "dsputil_mmx_rnd.h"
177 /***********************************/
180 #define DEF(x) x ## _3dnow
181 #define PAVGB "pavgusb"
183 #include "dsputil_mmx_avg.h"
188 /***********************************/
191 #define DEF(x) x ## _mmx2
193 /* Introduced only in MMX2 set */
194 #define PAVGB "pavgb"
196 #include "dsputil_mmx_avg.h"
201 #define put_no_rnd_pixels16_mmx put_pixels16_mmx
202 #define put_no_rnd_pixels8_mmx put_pixels8_mmx
203 #define put_pixels16_mmx2 put_pixels16_mmx
204 #define put_pixels8_mmx2 put_pixels8_mmx
205 #define put_pixels4_mmx2 put_pixels4_mmx
206 #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
207 #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
208 #define put_pixels16_3dnow put_pixels16_mmx
209 #define put_pixels8_3dnow put_pixels8_mmx
210 #define put_pixels4_3dnow put_pixels4_mmx
211 #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
212 #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
214 /***********************************/
217 void put_pixels_clamped_mmx(const DCTELEM
*block
, uint8_t *pixels
, int line_size
)
222 /* read the pixels */
227 "movq %3, %%mm0 \n\t"
228 "movq 8%3, %%mm1 \n\t"
229 "movq 16%3, %%mm2 \n\t"
230 "movq 24%3, %%mm3 \n\t"
231 "movq 32%3, %%mm4 \n\t"
232 "movq 40%3, %%mm5 \n\t"
233 "movq 48%3, %%mm6 \n\t"
234 "movq 56%3, %%mm7 \n\t"
235 "packuswb %%mm1, %%mm0 \n\t"
236 "packuswb %%mm3, %%mm2 \n\t"
237 "packuswb %%mm5, %%mm4 \n\t"
238 "packuswb %%mm7, %%mm6 \n\t"
239 "movq %%mm0, (%0) \n\t"
240 "movq %%mm2, (%0, %1) \n\t"
241 "movq %%mm4, (%0, %1, 2) \n\t"
242 "movq %%mm6, (%0, %2) \n\t"
243 ::"r" (pix
), "r" ((x86_reg
)line_size
), "r" ((x86_reg
)line_size
*3), "m"(*p
)
248 // if here would be an exact copy of the code above
249 // compiler would generate some very strange code
252 "movq (%3), %%mm0 \n\t"
253 "movq 8(%3), %%mm1 \n\t"
254 "movq 16(%3), %%mm2 \n\t"
255 "movq 24(%3), %%mm3 \n\t"
256 "movq 32(%3), %%mm4 \n\t"
257 "movq 40(%3), %%mm5 \n\t"
258 "movq 48(%3), %%mm6 \n\t"
259 "movq 56(%3), %%mm7 \n\t"
260 "packuswb %%mm1, %%mm0 \n\t"
261 "packuswb %%mm3, %%mm2 \n\t"
262 "packuswb %%mm5, %%mm4 \n\t"
263 "packuswb %%mm7, %%mm6 \n\t"
264 "movq %%mm0, (%0) \n\t"
265 "movq %%mm2, (%0, %1) \n\t"
266 "movq %%mm4, (%0, %1, 2) \n\t"
267 "movq %%mm6, (%0, %2) \n\t"
268 ::"r" (pix
), "r" ((x86_reg
)line_size
), "r" ((x86_reg
)line_size
*3), "r"(p
)
272 static DECLARE_ALIGNED_8(const unsigned char, vector128
[8]) =
273 { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
275 void put_signed_pixels_clamped_mmx(const DCTELEM
*block
, uint8_t *pixels
, int line_size
)
279 movq_m2r(*vector128
, mm1
);
280 for (i
= 0; i
< 8; i
++) {
281 movq_m2r(*(block
), mm0
);
282 packsswb_m2r(*(block
+ 4), mm0
);
285 movq_r2m(mm0
, *pixels
);
290 void add_pixels_clamped_mmx(const DCTELEM
*block
, uint8_t *pixels
, int line_size
)
296 /* read the pixels */
303 "movq (%2), %%mm0 \n\t"
304 "movq 8(%2), %%mm1 \n\t"
305 "movq 16(%2), %%mm2 \n\t"
306 "movq 24(%2), %%mm3 \n\t"
307 "movq %0, %%mm4 \n\t"
308 "movq %1, %%mm6 \n\t"
309 "movq %%mm4, %%mm5 \n\t"
310 "punpcklbw %%mm7, %%mm4 \n\t"
311 "punpckhbw %%mm7, %%mm5 \n\t"
312 "paddsw %%mm4, %%mm0 \n\t"
313 "paddsw %%mm5, %%mm1 \n\t"
314 "movq %%mm6, %%mm5 \n\t"
315 "punpcklbw %%mm7, %%mm6 \n\t"
316 "punpckhbw %%mm7, %%mm5 \n\t"
317 "paddsw %%mm6, %%mm2 \n\t"
318 "paddsw %%mm5, %%mm3 \n\t"
319 "packuswb %%mm1, %%mm0 \n\t"
320 "packuswb %%mm3, %%mm2 \n\t"
321 "movq %%mm0, %0 \n\t"
322 "movq %%mm2, %1 \n\t"
323 :"+m"(*pix
), "+m"(*(pix
+line_size
))
331 static void put_pixels4_mmx(uint8_t *block
, const uint8_t *pixels
, int line_size
, int h
)
334 "lea (%3, %3), %%"REG_a
" \n\t"
337 "movd (%1), %%mm0 \n\t"
338 "movd (%1, %3), %%mm1 \n\t"
339 "movd %%mm0, (%2) \n\t"
340 "movd %%mm1, (%2, %3) \n\t"
341 "add %%"REG_a
", %1 \n\t"
342 "add %%"REG_a
", %2 \n\t"
343 "movd (%1), %%mm0 \n\t"
344 "movd (%1, %3), %%mm1 \n\t"
345 "movd %%mm0, (%2) \n\t"
346 "movd %%mm1, (%2, %3) \n\t"
347 "add %%"REG_a
", %1 \n\t"
348 "add %%"REG_a
", %2 \n\t"
351 : "+g"(h
), "+r" (pixels
), "+r" (block
)
352 : "r"((x86_reg
)line_size
)
357 static void put_pixels8_mmx(uint8_t *block
, const uint8_t *pixels
, int line_size
, int h
)
360 "lea (%3, %3), %%"REG_a
" \n\t"
363 "movq (%1), %%mm0 \n\t"
364 "movq (%1, %3), %%mm1 \n\t"
365 "movq %%mm0, (%2) \n\t"
366 "movq %%mm1, (%2, %3) \n\t"
367 "add %%"REG_a
", %1 \n\t"
368 "add %%"REG_a
", %2 \n\t"
369 "movq (%1), %%mm0 \n\t"
370 "movq (%1, %3), %%mm1 \n\t"
371 "movq %%mm0, (%2) \n\t"
372 "movq %%mm1, (%2, %3) \n\t"
373 "add %%"REG_a
", %1 \n\t"
374 "add %%"REG_a
", %2 \n\t"
377 : "+g"(h
), "+r" (pixels
), "+r" (block
)
378 : "r"((x86_reg
)line_size
)
383 static void put_pixels16_mmx(uint8_t *block
, const uint8_t *pixels
, int line_size
, int h
)
386 "lea (%3, %3), %%"REG_a
" \n\t"
389 "movq (%1), %%mm0 \n\t"
390 "movq 8(%1), %%mm4 \n\t"
391 "movq (%1, %3), %%mm1 \n\t"
392 "movq 8(%1, %3), %%mm5 \n\t"
393 "movq %%mm0, (%2) \n\t"
394 "movq %%mm4, 8(%2) \n\t"
395 "movq %%mm1, (%2, %3) \n\t"
396 "movq %%mm5, 8(%2, %3) \n\t"
397 "add %%"REG_a
", %1 \n\t"
398 "add %%"REG_a
", %2 \n\t"
399 "movq (%1), %%mm0 \n\t"
400 "movq 8(%1), %%mm4 \n\t"
401 "movq (%1, %3), %%mm1 \n\t"
402 "movq 8(%1, %3), %%mm5 \n\t"
403 "movq %%mm0, (%2) \n\t"
404 "movq %%mm4, 8(%2) \n\t"
405 "movq %%mm1, (%2, %3) \n\t"
406 "movq %%mm5, 8(%2, %3) \n\t"
407 "add %%"REG_a
", %1 \n\t"
408 "add %%"REG_a
", %2 \n\t"
411 : "+g"(h
), "+r" (pixels
), "+r" (block
)
412 : "r"((x86_reg
)line_size
)
417 static void put_pixels16_sse2(uint8_t *block
, const uint8_t *pixels
, int line_size
, int h
)
421 "movdqu (%1), %%xmm0 \n\t"
422 "movdqu (%1,%3), %%xmm1 \n\t"
423 "movdqu (%1,%3,2), %%xmm2 \n\t"
424 "movdqu (%1,%4), %%xmm3 \n\t"
425 "movdqa %%xmm0, (%2) \n\t"
426 "movdqa %%xmm1, (%2,%3) \n\t"
427 "movdqa %%xmm2, (%2,%3,2) \n\t"
428 "movdqa %%xmm3, (%2,%4) \n\t"
430 "lea (%1,%3,4), %1 \n\t"
431 "lea (%2,%3,4), %2 \n\t"
433 : "+g"(h
), "+r" (pixels
), "+r" (block
)
434 : "r"((x86_reg
)line_size
), "r"((x86_reg
)3L*line_size
)
439 static void avg_pixels16_sse2(uint8_t *block
, const uint8_t *pixels
, int line_size
, int h
)
443 "movdqu (%1), %%xmm0 \n\t"
444 "movdqu (%1,%3), %%xmm1 \n\t"
445 "movdqu (%1,%3,2), %%xmm2 \n\t"
446 "movdqu (%1,%4), %%xmm3 \n\t"
447 "pavgb (%2), %%xmm0 \n\t"
448 "pavgb (%2,%3), %%xmm1 \n\t"
449 "pavgb (%2,%3,2), %%xmm2 \n\t"
450 "pavgb (%2,%4), %%xmm3 \n\t"
451 "movdqa %%xmm0, (%2) \n\t"
452 "movdqa %%xmm1, (%2,%3) \n\t"
453 "movdqa %%xmm2, (%2,%3,2) \n\t"
454 "movdqa %%xmm3, (%2,%4) \n\t"
456 "lea (%1,%3,4), %1 \n\t"
457 "lea (%2,%3,4), %2 \n\t"
459 : "+g"(h
), "+r" (pixels
), "+r" (block
)
460 : "r"((x86_reg
)line_size
), "r"((x86_reg
)3L*line_size
)
465 static void clear_blocks_mmx(DCTELEM
*blocks
)
468 "pxor %%mm7, %%mm7 \n\t"
469 "mov $-128*6, %%"REG_a
" \n\t"
471 "movq %%mm7, (%0, %%"REG_a
") \n\t"
472 "movq %%mm7, 8(%0, %%"REG_a
") \n\t"
473 "movq %%mm7, 16(%0, %%"REG_a
") \n\t"
474 "movq %%mm7, 24(%0, %%"REG_a
") \n\t"
475 "add $32, %%"REG_a
" \n\t"
477 : : "r" (((uint8_t *)blocks
)+128*6)
482 static void add_bytes_mmx(uint8_t *dst
, uint8_t *src
, int w
){
486 "movq (%1, %0), %%mm0 \n\t"
487 "movq (%2, %0), %%mm1 \n\t"
488 "paddb %%mm0, %%mm1 \n\t"
489 "movq %%mm1, (%2, %0) \n\t"
490 "movq 8(%1, %0), %%mm0 \n\t"
491 "movq 8(%2, %0), %%mm1 \n\t"
492 "paddb %%mm0, %%mm1 \n\t"
493 "movq %%mm1, 8(%2, %0) \n\t"
498 : "r"(src
), "r"(dst
), "r"((x86_reg
)w
-15)
501 dst
[i
+0] += src
[i
+0];
504 static void add_bytes_l2_mmx(uint8_t *dst
, uint8_t *src1
, uint8_t *src2
, int w
){
508 "movq (%2, %0), %%mm0 \n\t"
509 "movq 8(%2, %0), %%mm1 \n\t"
510 "paddb (%3, %0), %%mm0 \n\t"
511 "paddb 8(%3, %0), %%mm1 \n\t"
512 "movq %%mm0, (%1, %0) \n\t"
513 "movq %%mm1, 8(%1, %0) \n\t"
518 : "r"(dst
), "r"(src1
), "r"(src2
), "r"((x86_reg
)w
-15)
521 dst
[i
] = src1
[i
] + src2
[i
];
524 #define H263_LOOP_FILTER \
525 "pxor %%mm7, %%mm7 \n\t"\
526 "movq %0, %%mm0 \n\t"\
527 "movq %0, %%mm1 \n\t"\
528 "movq %3, %%mm2 \n\t"\
529 "movq %3, %%mm3 \n\t"\
530 "punpcklbw %%mm7, %%mm0 \n\t"\
531 "punpckhbw %%mm7, %%mm1 \n\t"\
532 "punpcklbw %%mm7, %%mm2 \n\t"\
533 "punpckhbw %%mm7, %%mm3 \n\t"\
534 "psubw %%mm2, %%mm0 \n\t"\
535 "psubw %%mm3, %%mm1 \n\t"\
536 "movq %1, %%mm2 \n\t"\
537 "movq %1, %%mm3 \n\t"\
538 "movq %2, %%mm4 \n\t"\
539 "movq %2, %%mm5 \n\t"\
540 "punpcklbw %%mm7, %%mm2 \n\t"\
541 "punpckhbw %%mm7, %%mm3 \n\t"\
542 "punpcklbw %%mm7, %%mm4 \n\t"\
543 "punpckhbw %%mm7, %%mm5 \n\t"\
544 "psubw %%mm2, %%mm4 \n\t"\
545 "psubw %%mm3, %%mm5 \n\t"\
546 "psllw $2, %%mm4 \n\t"\
547 "psllw $2, %%mm5 \n\t"\
548 "paddw %%mm0, %%mm4 \n\t"\
549 "paddw %%mm1, %%mm5 \n\t"\
550 "pxor %%mm6, %%mm6 \n\t"\
551 "pcmpgtw %%mm4, %%mm6 \n\t"\
552 "pcmpgtw %%mm5, %%mm7 \n\t"\
553 "pxor %%mm6, %%mm4 \n\t"\
554 "pxor %%mm7, %%mm5 \n\t"\
555 "psubw %%mm6, %%mm4 \n\t"\
556 "psubw %%mm7, %%mm5 \n\t"\
557 "psrlw $3, %%mm4 \n\t"\
558 "psrlw $3, %%mm5 \n\t"\
559 "packuswb %%mm5, %%mm4 \n\t"\
560 "packsswb %%mm7, %%mm6 \n\t"\
561 "pxor %%mm7, %%mm7 \n\t"\
562 "movd %4, %%mm2 \n\t"\
563 "punpcklbw %%mm2, %%mm2 \n\t"\
564 "punpcklbw %%mm2, %%mm2 \n\t"\
565 "punpcklbw %%mm2, %%mm2 \n\t"\
566 "psubusb %%mm4, %%mm2 \n\t"\
567 "movq %%mm2, %%mm3 \n\t"\
568 "psubusb %%mm4, %%mm3 \n\t"\
569 "psubb %%mm3, %%mm2 \n\t"\
570 "movq %1, %%mm3 \n\t"\
571 "movq %2, %%mm4 \n\t"\
572 "pxor %%mm6, %%mm3 \n\t"\
573 "pxor %%mm6, %%mm4 \n\t"\
574 "paddusb %%mm2, %%mm3 \n\t"\
575 "psubusb %%mm2, %%mm4 \n\t"\
576 "pxor %%mm6, %%mm3 \n\t"\
577 "pxor %%mm6, %%mm4 \n\t"\
578 "paddusb %%mm2, %%mm2 \n\t"\
579 "packsswb %%mm1, %%mm0 \n\t"\
580 "pcmpgtb %%mm0, %%mm7 \n\t"\
581 "pxor %%mm7, %%mm0 \n\t"\
582 "psubb %%mm7, %%mm0 \n\t"\
583 "movq %%mm0, %%mm1 \n\t"\
584 "psubusb %%mm2, %%mm0 \n\t"\
585 "psubb %%mm0, %%mm1 \n\t"\
586 "pand %5, %%mm1 \n\t"\
587 "psrlw $2, %%mm1 \n\t"\
588 "pxor %%mm7, %%mm1 \n\t"\
589 "psubb %%mm7, %%mm1 \n\t"\
590 "movq %0, %%mm5 \n\t"\
591 "movq %3, %%mm6 \n\t"\
592 "psubb %%mm1, %%mm5 \n\t"\
593 "paddb %%mm1, %%mm6 \n\t"
595 static void h263_v_loop_filter_mmx(uint8_t *src
, int stride
, int qscale
){
596 if(ENABLE_ANY_H263
) {
597 const int strength
= ff_h263_loop_filter_strength
[qscale
];
603 "movq %%mm3, %1 \n\t"
604 "movq %%mm4, %2 \n\t"
605 "movq %%mm5, %0 \n\t"
606 "movq %%mm6, %3 \n\t"
607 : "+m" (*(uint64_t*)(src
- 2*stride
)),
608 "+m" (*(uint64_t*)(src
- 1*stride
)),
609 "+m" (*(uint64_t*)(src
+ 0*stride
)),
610 "+m" (*(uint64_t*)(src
+ 1*stride
))
611 : "g" (2*strength
), "m"(ff_pb_FC
)
616 static inline void transpose4x4(uint8_t *dst
, uint8_t *src
, int dst_stride
, int src_stride
){
617 asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
618 "movd %4, %%mm0 \n\t"
619 "movd %5, %%mm1 \n\t"
620 "movd %6, %%mm2 \n\t"
621 "movd %7, %%mm3 \n\t"
622 "punpcklbw %%mm1, %%mm0 \n\t"
623 "punpcklbw %%mm3, %%mm2 \n\t"
624 "movq %%mm0, %%mm1 \n\t"
625 "punpcklwd %%mm2, %%mm0 \n\t"
626 "punpckhwd %%mm2, %%mm1 \n\t"
627 "movd %%mm0, %0 \n\t"
628 "punpckhdq %%mm0, %%mm0 \n\t"
629 "movd %%mm0, %1 \n\t"
630 "movd %%mm1, %2 \n\t"
631 "punpckhdq %%mm1, %%mm1 \n\t"
632 "movd %%mm1, %3 \n\t"
634 : "=m" (*(uint32_t*)(dst
+ 0*dst_stride
)),
635 "=m" (*(uint32_t*)(dst
+ 1*dst_stride
)),
636 "=m" (*(uint32_t*)(dst
+ 2*dst_stride
)),
637 "=m" (*(uint32_t*)(dst
+ 3*dst_stride
))
638 : "m" (*(uint32_t*)(src
+ 0*src_stride
)),
639 "m" (*(uint32_t*)(src
+ 1*src_stride
)),
640 "m" (*(uint32_t*)(src
+ 2*src_stride
)),
641 "m" (*(uint32_t*)(src
+ 3*src_stride
))
645 static void h263_h_loop_filter_mmx(uint8_t *src
, int stride
, int qscale
){
646 if(ENABLE_ANY_H263
) {
647 const int strength
= ff_h263_loop_filter_strength
[qscale
];
648 DECLARE_ALIGNED(8, uint64_t, temp
[4]);
649 uint8_t *btemp
= (uint8_t*)temp
;
653 transpose4x4(btemp
, src
, 8, stride
);
654 transpose4x4(btemp
+4, src
+ 4*stride
, 8, stride
);
656 H263_LOOP_FILTER
// 5 3 4 6
662 : "g" (2*strength
), "m"(ff_pb_FC
)
666 "movq %%mm5, %%mm1 \n\t"
667 "movq %%mm4, %%mm0 \n\t"
668 "punpcklbw %%mm3, %%mm5 \n\t"
669 "punpcklbw %%mm6, %%mm4 \n\t"
670 "punpckhbw %%mm3, %%mm1 \n\t"
671 "punpckhbw %%mm6, %%mm0 \n\t"
672 "movq %%mm5, %%mm3 \n\t"
673 "movq %%mm1, %%mm6 \n\t"
674 "punpcklwd %%mm4, %%mm5 \n\t"
675 "punpcklwd %%mm0, %%mm1 \n\t"
676 "punpckhwd %%mm4, %%mm3 \n\t"
677 "punpckhwd %%mm0, %%mm6 \n\t"
678 "movd %%mm5, (%0) \n\t"
679 "punpckhdq %%mm5, %%mm5 \n\t"
680 "movd %%mm5, (%0,%2) \n\t"
681 "movd %%mm3, (%0,%2,2) \n\t"
682 "punpckhdq %%mm3, %%mm3 \n\t"
683 "movd %%mm3, (%0,%3) \n\t"
684 "movd %%mm1, (%1) \n\t"
685 "punpckhdq %%mm1, %%mm1 \n\t"
686 "movd %%mm1, (%1,%2) \n\t"
687 "movd %%mm6, (%1,%2,2) \n\t"
688 "punpckhdq %%mm6, %%mm6 \n\t"
689 "movd %%mm6, (%1,%3) \n\t"
691 "r" (src
+ 4*stride
),
692 "r" ((x86_reg
) stride
),
693 "r" ((x86_reg
)(3*stride
))
698 /* draw the edges of width 'w' of an image of size width, height
699 this mmx version can only handle w==8 || w==16 */
700 static void draw_edges_mmx(uint8_t *buf
, int wrap
, int width
, int height
, int w
)
702 uint8_t *ptr
, *last_line
;
705 last_line
= buf
+ (height
- 1) * wrap
;
712 "movd (%0), %%mm0 \n\t"
713 "punpcklbw %%mm0, %%mm0 \n\t"
714 "punpcklwd %%mm0, %%mm0 \n\t"
715 "punpckldq %%mm0, %%mm0 \n\t"
716 "movq %%mm0, -8(%0) \n\t"
717 "movq -8(%0, %2), %%mm1 \n\t"
718 "punpckhbw %%mm1, %%mm1 \n\t"
719 "punpckhwd %%mm1, %%mm1 \n\t"
720 "punpckhdq %%mm1, %%mm1 \n\t"
721 "movq %%mm1, (%0, %2) \n\t"
726 : "r" ((x86_reg
)wrap
), "r" ((x86_reg
)width
), "r" (ptr
+ wrap
*height
)
733 "movd (%0), %%mm0 \n\t"
734 "punpcklbw %%mm0, %%mm0 \n\t"
735 "punpcklwd %%mm0, %%mm0 \n\t"
736 "punpckldq %%mm0, %%mm0 \n\t"
737 "movq %%mm0, -8(%0) \n\t"
738 "movq %%mm0, -16(%0) \n\t"
739 "movq -8(%0, %2), %%mm1 \n\t"
740 "punpckhbw %%mm1, %%mm1 \n\t"
741 "punpckhwd %%mm1, %%mm1 \n\t"
742 "punpckhdq %%mm1, %%mm1 \n\t"
743 "movq %%mm1, (%0, %2) \n\t"
744 "movq %%mm1, 8(%0, %2) \n\t"
749 : "r" ((x86_reg
)wrap
), "r" ((x86_reg
)width
), "r" (ptr
+ wrap
*height
)
754 /* top and bottom (and hopefully also the corners) */
755 ptr
= buf
- (i
+ 1) * wrap
- w
;
758 "movq (%1, %0), %%mm0 \n\t"
759 "movq %%mm0, (%0) \n\t"
760 "movq %%mm0, (%0, %2) \n\t"
761 "movq %%mm0, (%0, %2, 2) \n\t"
762 "movq %%mm0, (%0, %3) \n\t"
767 : "r" ((x86_reg
)buf
- (x86_reg
)ptr
- w
), "r" ((x86_reg
)-wrap
), "r" ((x86_reg
)-wrap
*3), "r" (ptr
+width
+2*w
)
769 ptr
= last_line
+ (i
+ 1) * wrap
- w
;
772 "movq (%1, %0), %%mm0 \n\t"
773 "movq %%mm0, (%0) \n\t"
774 "movq %%mm0, (%0, %2) \n\t"
775 "movq %%mm0, (%0, %2, 2) \n\t"
776 "movq %%mm0, (%0, %3) \n\t"
781 : "r" ((x86_reg
)last_line
- (x86_reg
)ptr
- w
), "r" ((x86_reg
)wrap
), "r" ((x86_reg
)wrap
*3), "r" (ptr
+width
+2*w
)
786 #define PAETH(cpu, abs3)\
787 void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\
792 "pxor %%mm7, %%mm7 \n"\
793 "movd (%1,%0), %%mm0 \n"\
794 "movd (%2,%0), %%mm1 \n"\
795 "punpcklbw %%mm7, %%mm0 \n"\
796 "punpcklbw %%mm7, %%mm1 \n"\
799 "movq %%mm1, %%mm2 \n"\
800 "movd (%2,%0), %%mm1 \n"\
801 "movq %%mm2, %%mm3 \n"\
802 "punpcklbw %%mm7, %%mm1 \n"\
803 "movq %%mm2, %%mm4 \n"\
804 "psubw %%mm1, %%mm3 \n"\
805 "psubw %%mm0, %%mm4 \n"\
806 "movq %%mm3, %%mm5 \n"\
807 "paddw %%mm4, %%mm5 \n"\
809 "movq %%mm4, %%mm6 \n"\
810 "pminsw %%mm5, %%mm6 \n"\
811 "pcmpgtw %%mm6, %%mm3 \n"\
812 "pcmpgtw %%mm5, %%mm4 \n"\
813 "movq %%mm4, %%mm6 \n"\
814 "pand %%mm3, %%mm4 \n"\
815 "pandn %%mm3, %%mm6 \n"\
816 "pandn %%mm0, %%mm3 \n"\
817 "movd (%3,%0), %%mm0 \n"\
818 "pand %%mm1, %%mm6 \n"\
819 "pand %%mm4, %%mm2 \n"\
820 "punpcklbw %%mm7, %%mm0 \n"\
822 "paddw %%mm6, %%mm0 \n"\
823 "paddw %%mm2, %%mm3 \n"\
824 "paddw %%mm3, %%mm0 \n"\
825 "pand %%mm5, %%mm0 \n"\
826 "movq %%mm0, %%mm3 \n"\
827 "packuswb %%mm3, %%mm3 \n"\
828 "movd %%mm3, (%1,%0) \n"\
833 :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\
840 "psubw %%mm5, %%mm7 \n"\
841 "pmaxsw %%mm7, %%mm5 \n"\
842 "pxor %%mm6, %%mm6 \n"\
843 "pxor %%mm7, %%mm7 \n"\
844 "psubw %%mm3, %%mm6 \n"\
845 "psubw %%mm4, %%mm7 \n"\
846 "pmaxsw %%mm6, %%mm3 \n"\
847 "pmaxsw %%mm7, %%mm4 \n"\
848 "pxor %%mm7, %%mm7 \n"
851 "pabsw %%mm3, %%mm3 \n"\
852 "pabsw %%mm4, %%mm4 \n"\
853 "pabsw %%mm5, %%mm5 \n"
855 PAETH(mmx2
, ABS3_MMX2
)
857 PAETH(ssse3
, ABS3_SSSE3
)
860 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
861 "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
862 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
863 "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
864 "movq "#in7", " #m3 " \n\t" /* d */\
865 "movq "#in0", %%mm5 \n\t" /* D */\
866 "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
867 "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
868 "movq "#in1", %%mm5 \n\t" /* C */\
869 "movq "#in2", %%mm6 \n\t" /* B */\
870 "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
871 "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
872 "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
873 "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
874 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
875 "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
876 "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
877 "psraw $5, %%mm5 \n\t"\
878 "packuswb %%mm5, %%mm5 \n\t"\
879 OP(%%mm5, out, %%mm7, d)
881 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
882 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
886 "pxor %%mm7, %%mm7 \n\t"\
888 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
889 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
890 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
891 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
892 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
893 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
894 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
895 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
896 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
897 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
898 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
899 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
900 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
901 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
902 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
903 "paddw %%mm3, %%mm5 \n\t" /* b */\
904 "paddw %%mm2, %%mm6 \n\t" /* c */\
905 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
906 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
907 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
908 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
909 "paddw %%mm4, %%mm0 \n\t" /* a */\
910 "paddw %%mm1, %%mm5 \n\t" /* d */\
911 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
912 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
913 "paddw %6, %%mm6 \n\t"\
914 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
915 "psraw $5, %%mm0 \n\t"\
916 "movq %%mm0, %5 \n\t"\
917 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
919 "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
920 "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
921 "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
922 "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
923 "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
924 "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
925 "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
926 "paddw %%mm0, %%mm2 \n\t" /* b */\
927 "paddw %%mm5, %%mm3 \n\t" /* c */\
928 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
929 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
930 "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
931 "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
932 "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
933 "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
934 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
935 "paddw %%mm2, %%mm1 \n\t" /* a */\
936 "paddw %%mm6, %%mm4 \n\t" /* d */\
937 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
938 "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
939 "paddw %6, %%mm1 \n\t"\
940 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
941 "psraw $5, %%mm3 \n\t"\
942 "movq %5, %%mm1 \n\t"\
943 "packuswb %%mm3, %%mm1 \n\t"\
944 OP_MMX2(%%mm1, (%1),%%mm4, q)\
945 /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
947 "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
948 "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
949 "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
950 "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
951 "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
952 "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
953 "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
954 "paddw %%mm1, %%mm5 \n\t" /* b */\
955 "paddw %%mm4, %%mm0 \n\t" /* c */\
956 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
957 "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
958 "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
959 "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
960 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
961 "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
962 "paddw %%mm3, %%mm2 \n\t" /* d */\
963 "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
964 "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
965 "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
966 "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
967 "paddw %%mm2, %%mm6 \n\t" /* a */\
968 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
969 "paddw %6, %%mm0 \n\t"\
970 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
971 "psraw $5, %%mm0 \n\t"\
972 /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
974 "paddw %%mm5, %%mm3 \n\t" /* a */\
975 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
976 "paddw %%mm4, %%mm6 \n\t" /* b */\
977 "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
978 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
979 "paddw %%mm1, %%mm4 \n\t" /* c */\
980 "paddw %%mm2, %%mm5 \n\t" /* d */\
981 "paddw %%mm6, %%mm6 \n\t" /* 2b */\
982 "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
983 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
984 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
985 "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
986 "paddw %6, %%mm4 \n\t"\
987 "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
988 "psraw $5, %%mm4 \n\t"\
989 "packuswb %%mm4, %%mm0 \n\t"\
990 OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
996 : "+a"(src), "+c"(dst), "+D"(h)\
997 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1002 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1005 /* quick HACK, XXX FIXME MUST be optimized */\
1008 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1009 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1010 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1011 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1012 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1013 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
1014 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
1015 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
1016 temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
1017 temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
1018 temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
1019 temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
1020 temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
1021 temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
1022 temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
1023 temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
1025 "movq (%0), %%mm0 \n\t"\
1026 "movq 8(%0), %%mm1 \n\t"\
1027 "paddw %2, %%mm0 \n\t"\
1028 "paddw %2, %%mm1 \n\t"\
1029 "psraw $5, %%mm0 \n\t"\
1030 "psraw $5, %%mm1 \n\t"\
1031 "packuswb %%mm1, %%mm0 \n\t"\
1032 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1033 "movq 16(%0), %%mm0 \n\t"\
1034 "movq 24(%0), %%mm1 \n\t"\
1035 "paddw %2, %%mm0 \n\t"\
1036 "paddw %2, %%mm1 \n\t"\
1037 "psraw $5, %%mm0 \n\t"\
1038 "psraw $5, %%mm1 \n\t"\
1039 "packuswb %%mm1, %%mm0 \n\t"\
1040 OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
1041 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1049 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1051 "pxor %%mm7, %%mm7 \n\t"\
1053 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
1054 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
1055 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
1056 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
1057 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
1058 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
1059 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
1060 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
1061 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
1062 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
1063 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
1064 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
1065 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
1066 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
1067 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
1068 "paddw %%mm3, %%mm5 \n\t" /* b */\
1069 "paddw %%mm2, %%mm6 \n\t" /* c */\
1070 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1071 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
1072 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
1073 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
1074 "paddw %%mm4, %%mm0 \n\t" /* a */\
1075 "paddw %%mm1, %%mm5 \n\t" /* d */\
1076 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1077 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
1078 "paddw %5, %%mm6 \n\t"\
1079 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1080 "psraw $5, %%mm0 \n\t"\
1081 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1083 "movd 5(%0), %%mm5 \n\t" /* FGHI */\
1084 "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
1085 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
1086 "paddw %%mm5, %%mm1 \n\t" /* a */\
1087 "paddw %%mm6, %%mm2 \n\t" /* b */\
1088 "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
1089 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
1090 "paddw %%mm6, %%mm3 \n\t" /* c */\
1091 "paddw %%mm5, %%mm4 \n\t" /* d */\
1092 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
1093 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
1094 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1095 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
1096 "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
1097 "paddw %5, %%mm1 \n\t"\
1098 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
1099 "psraw $5, %%mm3 \n\t"\
1100 "packuswb %%mm3, %%mm0 \n\t"\
1101 OP_MMX2(%%mm0, (%1), %%mm4, q)\
1107 : "+a"(src), "+c"(dst), "+d"(h)\
1108 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\
1113 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1116 /* quick HACK, XXX FIXME MUST be optimized */\
1119 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1120 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1121 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1122 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1123 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1124 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
1125 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
1126 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
1128 "movq (%0), %%mm0 \n\t"\
1129 "movq 8(%0), %%mm1 \n\t"\
1130 "paddw %2, %%mm0 \n\t"\
1131 "paddw %2, %%mm1 \n\t"\
1132 "psraw $5, %%mm0 \n\t"\
1133 "psraw $5, %%mm1 \n\t"\
1134 "packuswb %%mm1, %%mm0 \n\t"\
1135 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1136 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1144 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
1146 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1147 uint64_t temp[17*4];\
1148 uint64_t *temp_ptr= temp;\
1153 "pxor %%mm7, %%mm7 \n\t"\
1155 "movq (%0), %%mm0 \n\t"\
1156 "movq (%0), %%mm1 \n\t"\
1157 "movq 8(%0), %%mm2 \n\t"\
1158 "movq 8(%0), %%mm3 \n\t"\
1159 "punpcklbw %%mm7, %%mm0 \n\t"\
1160 "punpckhbw %%mm7, %%mm1 \n\t"\
1161 "punpcklbw %%mm7, %%mm2 \n\t"\
1162 "punpckhbw %%mm7, %%mm3 \n\t"\
1163 "movq %%mm0, (%1) \n\t"\
1164 "movq %%mm1, 17*8(%1) \n\t"\
1165 "movq %%mm2, 2*17*8(%1) \n\t"\
1166 "movq %%mm3, 3*17*8(%1) \n\t"\
1171 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1172 : "r" ((x86_reg)srcStride)\
1179 /*FIXME reorder for speed */\
1181 /*"pxor %%mm7, %%mm7 \n\t"*/\
1183 "movq (%0), %%mm0 \n\t"\
1184 "movq 8(%0), %%mm1 \n\t"\
1185 "movq 16(%0), %%mm2 \n\t"\
1186 "movq 24(%0), %%mm3 \n\t"\
1187 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
1188 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
1190 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
1192 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1194 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1195 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
1197 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
1198 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
1200 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
1201 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
1203 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
1204 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
1206 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
1208 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
1210 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
1211 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
1213 "add $136, %0 \n\t"\
1218 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1219 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
1224 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1225 uint64_t temp[9*2];\
1226 uint64_t *temp_ptr= temp;\
1231 "pxor %%mm7, %%mm7 \n\t"\
1233 "movq (%0), %%mm0 \n\t"\
1234 "movq (%0), %%mm1 \n\t"\
1235 "punpcklbw %%mm7, %%mm0 \n\t"\
1236 "punpckhbw %%mm7, %%mm1 \n\t"\
1237 "movq %%mm0, (%1) \n\t"\
1238 "movq %%mm1, 9*8(%1) \n\t"\
1243 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1244 : "r" ((x86_reg)srcStride)\
1251 /*FIXME reorder for speed */\
1253 /*"pxor %%mm7, %%mm7 \n\t"*/\
1255 "movq (%0), %%mm0 \n\t"\
1256 "movq 8(%0), %%mm1 \n\t"\
1257 "movq 16(%0), %%mm2 \n\t"\
1258 "movq 24(%0), %%mm3 \n\t"\
1259 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
1260 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
1262 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
1264 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1266 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1268 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
1270 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
1271 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
1278 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1279 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
1284 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1285 OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
1288 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1290 uint8_t * const half= (uint8_t*)temp;\
1291 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1292 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1295 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1296 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
1299 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1301 uint8_t * const half= (uint8_t*)temp;\
1302 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1303 OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
1306 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1308 uint8_t * const half= (uint8_t*)temp;\
1309 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1310 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1313 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1314 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
1317 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1319 uint8_t * const half= (uint8_t*)temp;\
1320 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1321 OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
1323 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1324 uint64_t half[8 + 9];\
1325 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1326 uint8_t * const halfHV= ((uint8_t*)half);\
1327 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1328 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1329 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1330 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1332 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1333 uint64_t half[8 + 9];\
1334 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1335 uint8_t * const halfHV= ((uint8_t*)half);\
1336 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1337 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1338 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1339 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1341 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1342 uint64_t half[8 + 9];\
1343 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1344 uint8_t * const halfHV= ((uint8_t*)half);\
1345 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1346 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1347 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1348 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1350 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1351 uint64_t half[8 + 9];\
1352 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1353 uint8_t * const halfHV= ((uint8_t*)half);\
1354 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1355 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1356 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1357 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1359 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1360 uint64_t half[8 + 9];\
1361 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1362 uint8_t * const halfHV= ((uint8_t*)half);\
1363 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1364 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1365 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1367 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1368 uint64_t half[8 + 9];\
1369 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1370 uint8_t * const halfHV= ((uint8_t*)half);\
1371 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1372 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1373 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1375 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1376 uint64_t half[8 + 9];\
1377 uint8_t * const halfH= ((uint8_t*)half);\
1378 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1379 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1380 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1382 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1383 uint64_t half[8 + 9];\
1384 uint8_t * const halfH= ((uint8_t*)half);\
1385 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1386 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1387 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1389 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1391 uint8_t * const halfH= ((uint8_t*)half);\
1392 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1393 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1395 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1396 OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
1399 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1401 uint8_t * const half= (uint8_t*)temp;\
1402 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1403 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1406 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1407 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
1410 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1412 uint8_t * const half= (uint8_t*)temp;\
1413 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1414 OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
1417 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1419 uint8_t * const half= (uint8_t*)temp;\
1420 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1421 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1424 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1425 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
1428 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1430 uint8_t * const half= (uint8_t*)temp;\
1431 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1432 OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
1434 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1435 uint64_t half[16*2 + 17*2];\
1436 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1437 uint8_t * const halfHV= ((uint8_t*)half);\
1438 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1439 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1440 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1441 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1443 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1444 uint64_t half[16*2 + 17*2];\
1445 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1446 uint8_t * const halfHV= ((uint8_t*)half);\
1447 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1448 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1449 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1450 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1452 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1453 uint64_t half[16*2 + 17*2];\
1454 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1455 uint8_t * const halfHV= ((uint8_t*)half);\
1456 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1457 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1458 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1459 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1461 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1462 uint64_t half[16*2 + 17*2];\
1463 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1464 uint8_t * const halfHV= ((uint8_t*)half);\
1465 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1466 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1467 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1468 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1470 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1471 uint64_t half[16*2 + 17*2];\
1472 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1473 uint8_t * const halfHV= ((uint8_t*)half);\
1474 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1475 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1476 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1478 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1479 uint64_t half[16*2 + 17*2];\
1480 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1481 uint8_t * const halfHV= ((uint8_t*)half);\
1482 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1483 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1484 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1486 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1487 uint64_t half[17*2];\
1488 uint8_t * const halfH= ((uint8_t*)half);\
1489 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1490 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1491 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1493 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1494 uint64_t half[17*2];\
1495 uint8_t * const halfH= ((uint8_t*)half);\
1496 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1497 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1498 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1500 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1501 uint64_t half[17*2];\
1502 uint8_t * const halfH= ((uint8_t*)half);\
1503 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1504 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1507 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
1508 #define AVG_3DNOW_OP(a,b,temp, size) \
1509 "mov" #size " " #b ", " #temp " \n\t"\
1510 "pavgusb " #temp ", " #a " \n\t"\
1511 "mov" #size " " #a ", " #b " \n\t"
1512 #define AVG_MMX2_OP(a,b,temp, size) \
1513 "mov" #size " " #b ", " #temp " \n\t"\
1514 "pavgb " #temp ", " #a " \n\t"\
1515 "mov" #size " " #a ", " #b " \n\t"
1517 QPEL_BASE(put_
, ff_pw_16
, _
, PUT_OP
, PUT_OP
)
1518 QPEL_BASE(avg_
, ff_pw_16
, _
, AVG_MMX2_OP
, AVG_3DNOW_OP
)
1519 QPEL_BASE(put_no_rnd_
, ff_pw_15
, _no_rnd_
, PUT_OP
, PUT_OP
)
1520 QPEL_OP(put_
, ff_pw_16
, _
, PUT_OP
, 3dnow
)
1521 QPEL_OP(avg_
, ff_pw_16
, _
, AVG_3DNOW_OP
, 3dnow
)
1522 QPEL_OP(put_no_rnd_
, ff_pw_15
, _no_rnd_
, PUT_OP
, 3dnow
)
1523 QPEL_OP(put_
, ff_pw_16
, _
, PUT_OP
, mmx2
)
1524 QPEL_OP(avg_
, ff_pw_16
, _
, AVG_MMX2_OP
, mmx2
)
1525 QPEL_OP(put_no_rnd_
, ff_pw_15
, _no_rnd_
, PUT_OP
, mmx2
)
1527 /***********************************/
1528 /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
1530 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
1531 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1532 OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
1534 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
1535 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1536 OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
1539 #define QPEL_2TAP(OPNAME, SIZE, MMX)\
1540 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
1541 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
1542 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
1543 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
1544 OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
1545 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
1546 OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
1547 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
1548 OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
1549 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1550 OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
1552 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1553 OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
1555 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\
1556 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\
1557 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\
1558 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\
1559 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\
1560 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\
1561 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\
1562 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
1564 QPEL_2TAP(put_, 16, mmx2)
1565 QPEL_2TAP(avg_
, 16, mmx2
)
1566 QPEL_2TAP(put_
, 8, mmx2
)
1567 QPEL_2TAP(avg_
, 8, mmx2
)
1568 QPEL_2TAP(put_
, 16, 3dnow
)
1569 QPEL_2TAP(avg_
, 16, 3dnow
)
1570 QPEL_2TAP(put_
, 8, 3dnow
)
1571 QPEL_2TAP(avg_
, 8, 3dnow
)
1575 static void just_return() { return; }
1578 static void gmc_mmx(uint8_t *dst
, uint8_t *src
, int stride
, int h
, int ox
, int oy
,
1579 int dxx
, int dxy
, int dyx
, int dyy
, int shift
, int r
, int width
, int height
){
1581 const int ix
= ox
>>(16+shift
);
1582 const int iy
= oy
>>(16+shift
);
1583 const int oxs
= ox
>>4;
1584 const int oys
= oy
>>4;
1585 const int dxxs
= dxx
>>4;
1586 const int dxys
= dxy
>>4;
1587 const int dyxs
= dyx
>>4;
1588 const int dyys
= dyy
>>4;
1589 const uint16_t r4
[4] = {r
,r
,r
,r
};
1590 const uint16_t dxy4
[4] = {dxys
,dxys
,dxys
,dxys
};
1591 const uint16_t dyy4
[4] = {dyys
,dyys
,dyys
,dyys
};
1592 const uint64_t shift2
= 2*shift
;
1593 uint8_t edge_buf
[(h
+1)*stride
];
1596 const int dxw
= (dxx
-(1<<(16+shift
)))*(w
-1);
1597 const int dyh
= (dyy
-(1<<(16+shift
)))*(h
-1);
1598 const int dxh
= dxy
*(h
-1);
1599 const int dyw
= dyx
*(w
-1);
1600 if( // non-constant fullpel offset (3% of blocks)
1601 ((ox
^(ox
+dxw
)) | (ox
^(ox
+dxh
)) | (ox
^(ox
+dxw
+dxh
)) |
1602 (oy
^(oy
+dyw
)) | (oy
^(oy
+dyh
)) | (oy
^(oy
+dyw
+dyh
))) >> (16+shift
)
1603 // uses more than 16 bits of subpel mv (only at huge resolution)
1604 || (dxx
|dxy
|dyx
|dyy
)&15 )
1606 //FIXME could still use mmx for some of the rows
1607 ff_gmc_c(dst
, src
, stride
, h
, ox
, oy
, dxx
, dxy
, dyx
, dyy
, shift
, r
, width
, height
);
1611 src
+= ix
+ iy
*stride
;
1612 if( (unsigned)ix
>= width
-w
||
1613 (unsigned)iy
>= height
-h
)
1615 ff_emulated_edge_mc(edge_buf
, src
, stride
, w
+1, h
+1, ix
, iy
, width
, height
);
1620 "movd %0, %%mm6 \n\t"
1621 "pxor %%mm7, %%mm7 \n\t"
1622 "punpcklwd %%mm6, %%mm6 \n\t"
1623 "punpcklwd %%mm6, %%mm6 \n\t"
1627 for(x
=0; x
<w
; x
+=4){
1628 uint16_t dx4
[4] = { oxs
- dxys
+ dxxs
*(x
+0),
1629 oxs
- dxys
+ dxxs
*(x
+1),
1630 oxs
- dxys
+ dxxs
*(x
+2),
1631 oxs
- dxys
+ dxxs
*(x
+3) };
1632 uint16_t dy4
[4] = { oys
- dyys
+ dyxs
*(x
+0),
1633 oys
- dyys
+ dyxs
*(x
+1),
1634 oys
- dyys
+ dyxs
*(x
+2),
1635 oys
- dyys
+ dyxs
*(x
+3) };
1639 "movq %0, %%mm4 \n\t"
1640 "movq %1, %%mm5 \n\t"
1641 "paddw %2, %%mm4 \n\t"
1642 "paddw %3, %%mm5 \n\t"
1643 "movq %%mm4, %0 \n\t"
1644 "movq %%mm5, %1 \n\t"
1645 "psrlw $12, %%mm4 \n\t"
1646 "psrlw $12, %%mm5 \n\t"
1647 : "+m"(*dx4
), "+m"(*dy4
)
1648 : "m"(*dxy4
), "m"(*dyy4
)
1652 "movq %%mm6, %%mm2 \n\t"
1653 "movq %%mm6, %%mm1 \n\t"
1654 "psubw %%mm4, %%mm2 \n\t"
1655 "psubw %%mm5, %%mm1 \n\t"
1656 "movq %%mm2, %%mm0 \n\t"
1657 "movq %%mm4, %%mm3 \n\t"
1658 "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
1659 "pmullw %%mm5, %%mm3 \n\t" // dx*dy
1660 "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
1661 "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
1663 "movd %4, %%mm5 \n\t"
1664 "movd %3, %%mm4 \n\t"
1665 "punpcklbw %%mm7, %%mm5 \n\t"
1666 "punpcklbw %%mm7, %%mm4 \n\t"
1667 "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
1668 "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
1670 "movd %2, %%mm5 \n\t"
1671 "movd %1, %%mm4 \n\t"
1672 "punpcklbw %%mm7, %%mm5 \n\t"
1673 "punpcklbw %%mm7, %%mm4 \n\t"
1674 "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
1675 "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
1676 "paddw %5, %%mm1 \n\t"
1677 "paddw %%mm3, %%mm2 \n\t"
1678 "paddw %%mm1, %%mm0 \n\t"
1679 "paddw %%mm2, %%mm0 \n\t"
1681 "psrlw %6, %%mm0 \n\t"
1682 "packuswb %%mm0, %%mm0 \n\t"
1683 "movd %%mm0, %0 \n\t"
1685 : "=m"(dst
[x
+y
*stride
])
1686 : "m"(src
[0]), "m"(src
[1]),
1687 "m"(src
[stride
]), "m"(src
[stride
+1]),
1688 "m"(*r4
), "m"(shift2
)
1696 #define PREFETCH(name, op) \
1697 static void name(void *mem, int stride, int h){\
1698 const uint8_t *p= mem;\
1700 asm volatile(#op" %0" :: "m"(*p));\
1704 PREFETCH(prefetch_mmx2
, prefetcht0
)
1705 PREFETCH(prefetch_3dnow
, prefetch
)
1708 #include "h264dsp_mmx.c"
1711 void ff_cavsdsp_init_mmx2(DSPContext
* c
, AVCodecContext
*avctx
);
1712 void ff_cavsdsp_init_3dnow(DSPContext
* c
, AVCodecContext
*avctx
);
1714 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst
, uint8_t *src
, int stride
) {
1715 put_pixels8_mmx(dst
, src
, stride
, 8);
1717 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst
, uint8_t *src
, int stride
) {
1718 avg_pixels8_mmx(dst
, src
, stride
, 8);
1720 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst
, uint8_t *src
, int stride
) {
1721 put_pixels16_mmx(dst
, src
, stride
, 16);
1723 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst
, uint8_t *src
, int stride
) {
1724 avg_pixels16_mmx(dst
, src
, stride
, 16);
1728 void ff_vc1dsp_init_mmx(DSPContext
* dsp
, AVCodecContext
*avctx
);
1730 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst
, const uint8_t *src
, int stride
, int rnd
) {
1731 put_pixels8_mmx(dst
, src
, stride
, 8);
1734 /* external functions, from idct_mmx.c */
1735 void ff_mmx_idct(DCTELEM
*block
);
1736 void ff_mmxext_idct(DCTELEM
*block
);
1738 /* XXX: those functions should be suppressed ASAP when all IDCTs are
1741 static void ff_libmpeg2mmx_idct_put(uint8_t *dest
, int line_size
, DCTELEM
*block
)
1743 ff_mmx_idct (block
);
1744 put_pixels_clamped_mmx(block
, dest
, line_size
);
1746 static void ff_libmpeg2mmx_idct_add(uint8_t *dest
, int line_size
, DCTELEM
*block
)
1748 ff_mmx_idct (block
);
1749 add_pixels_clamped_mmx(block
, dest
, line_size
);
1751 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest
, int line_size
, DCTELEM
*block
)
1753 ff_mmxext_idct (block
);
1754 put_pixels_clamped_mmx(block
, dest
, line_size
);
1756 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest
, int line_size
, DCTELEM
*block
)
1758 ff_mmxext_idct (block
);
1759 add_pixels_clamped_mmx(block
, dest
, line_size
);
1762 static void ff_idct_xvid_mmx_put(uint8_t *dest
, int line_size
, DCTELEM
*block
)
1764 ff_idct_xvid_mmx (block
);
1765 put_pixels_clamped_mmx(block
, dest
, line_size
);
1767 static void ff_idct_xvid_mmx_add(uint8_t *dest
, int line_size
, DCTELEM
*block
)
1769 ff_idct_xvid_mmx (block
);
1770 add_pixels_clamped_mmx(block
, dest
, line_size
);
1772 static void ff_idct_xvid_mmx2_put(uint8_t *dest
, int line_size
, DCTELEM
*block
)
1774 ff_idct_xvid_mmx2 (block
);
1775 put_pixels_clamped_mmx(block
, dest
, line_size
);
1777 static void ff_idct_xvid_mmx2_add(uint8_t *dest
, int line_size
, DCTELEM
*block
)
1779 ff_idct_xvid_mmx2 (block
);
1780 add_pixels_clamped_mmx(block
, dest
, line_size
);
1783 static void vorbis_inverse_coupling_3dnow(float *mag
, float *ang
, int blocksize
)
1786 asm volatile("pxor %%mm7, %%mm7":);
1787 for(i
=0; i
<blocksize
; i
+=2) {
1789 "movq %0, %%mm0 \n\t"
1790 "movq %1, %%mm1 \n\t"
1791 "movq %%mm0, %%mm2 \n\t"
1792 "movq %%mm1, %%mm3 \n\t"
1793 "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
1794 "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
1795 "pslld $31, %%mm2 \n\t" // keep only the sign bit
1796 "pxor %%mm2, %%mm1 \n\t"
1797 "movq %%mm3, %%mm4 \n\t"
1798 "pand %%mm1, %%mm3 \n\t"
1799 "pandn %%mm1, %%mm4 \n\t"
1800 "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1801 "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1802 "movq %%mm3, %1 \n\t"
1803 "movq %%mm0, %0 \n\t"
1804 :"+m"(mag
[i
]), "+m"(ang
[i
])
1808 asm volatile("femms");
1810 static void vorbis_inverse_coupling_sse(float *mag
, float *ang
, int blocksize
)
1815 "movaps %0, %%xmm5 \n\t"
1816 ::"m"(ff_pdw_80000000
[0])
1818 for(i
=0; i
<blocksize
; i
+=4) {
1820 "movaps %0, %%xmm0 \n\t"
1821 "movaps %1, %%xmm1 \n\t"
1822 "xorps %%xmm2, %%xmm2 \n\t"
1823 "xorps %%xmm3, %%xmm3 \n\t"
1824 "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
1825 "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
1826 "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
1827 "xorps %%xmm2, %%xmm1 \n\t"
1828 "movaps %%xmm3, %%xmm4 \n\t"
1829 "andps %%xmm1, %%xmm3 \n\t"
1830 "andnps %%xmm1, %%xmm4 \n\t"
1831 "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1832 "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1833 "movaps %%xmm3, %1 \n\t"
1834 "movaps %%xmm0, %0 \n\t"
1835 :"+m"(mag
[i
]), "+m"(ang
[i
])
1841 static void vector_fmul_3dnow(float *dst
, const float *src
, int len
){
1842 x86_reg i
= (len
-4)*4;
1845 "movq (%1,%0), %%mm0 \n\t"
1846 "movq 8(%1,%0), %%mm1 \n\t"
1847 "pfmul (%2,%0), %%mm0 \n\t"
1848 "pfmul 8(%2,%0), %%mm1 \n\t"
1849 "movq %%mm0, (%1,%0) \n\t"
1850 "movq %%mm1, 8(%1,%0) \n\t"
1859 static void vector_fmul_sse(float *dst
, const float *src
, int len
){
1860 x86_reg i
= (len
-8)*4;
1863 "movaps (%1,%0), %%xmm0 \n\t"
1864 "movaps 16(%1,%0), %%xmm1 \n\t"
1865 "mulps (%2,%0), %%xmm0 \n\t"
1866 "mulps 16(%2,%0), %%xmm1 \n\t"
1867 "movaps %%xmm0, (%1,%0) \n\t"
1868 "movaps %%xmm1, 16(%1,%0) \n\t"
1877 static void vector_fmul_reverse_3dnow2(float *dst
, const float *src0
, const float *src1
, int len
){
1878 x86_reg i
= len
*4-16;
1881 "pswapd 8(%1), %%mm0 \n\t"
1882 "pswapd (%1), %%mm1 \n\t"
1883 "pfmul (%3,%0), %%mm0 \n\t"
1884 "pfmul 8(%3,%0), %%mm1 \n\t"
1885 "movq %%mm0, (%2,%0) \n\t"
1886 "movq %%mm1, 8(%2,%0) \n\t"
1890 :"+r"(i
), "+r"(src1
)
1891 :"r"(dst
), "r"(src0
)
1893 asm volatile("femms");
1895 static void vector_fmul_reverse_sse(float *dst
, const float *src0
, const float *src1
, int len
){
1896 x86_reg i
= len
*4-32;
1899 "movaps 16(%1), %%xmm0 \n\t"
1900 "movaps (%1), %%xmm1 \n\t"
1901 "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
1902 "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
1903 "mulps (%3,%0), %%xmm0 \n\t"
1904 "mulps 16(%3,%0), %%xmm1 \n\t"
1905 "movaps %%xmm0, (%2,%0) \n\t"
1906 "movaps %%xmm1, 16(%2,%0) \n\t"
1910 :"+r"(i
), "+r"(src1
)
1911 :"r"(dst
), "r"(src0
)
1915 static void vector_fmul_add_add_3dnow(float *dst
, const float *src0
, const float *src1
,
1916 const float *src2
, int src3
, int len
, int step
){
1917 x86_reg i
= (len
-4)*4;
1918 if(step
== 2 && src3
== 0){
1922 "movq (%2,%0), %%mm0 \n\t"
1923 "movq 8(%2,%0), %%mm1 \n\t"
1924 "pfmul (%3,%0), %%mm0 \n\t"
1925 "pfmul 8(%3,%0), %%mm1 \n\t"
1926 "pfadd (%4,%0), %%mm0 \n\t"
1927 "pfadd 8(%4,%0), %%mm1 \n\t"
1928 "movd %%mm0, (%1) \n\t"
1929 "movd %%mm1, 16(%1) \n\t"
1930 "psrlq $32, %%mm0 \n\t"
1931 "psrlq $32, %%mm1 \n\t"
1932 "movd %%mm0, 8(%1) \n\t"
1933 "movd %%mm1, 24(%1) \n\t"
1938 :"r"(src0
), "r"(src1
), "r"(src2
)
1942 else if(step
== 1 && src3
== 0){
1945 "movq (%2,%0), %%mm0 \n\t"
1946 "movq 8(%2,%0), %%mm1 \n\t"
1947 "pfmul (%3,%0), %%mm0 \n\t"
1948 "pfmul 8(%3,%0), %%mm1 \n\t"
1949 "pfadd (%4,%0), %%mm0 \n\t"
1950 "pfadd 8(%4,%0), %%mm1 \n\t"
1951 "movq %%mm0, (%1,%0) \n\t"
1952 "movq %%mm1, 8(%1,%0) \n\t"
1956 :"r"(dst
), "r"(src0
), "r"(src1
), "r"(src2
)
1961 ff_vector_fmul_add_add_c(dst
, src0
, src1
, src2
, src3
, len
, step
);
1962 asm volatile("femms");
1964 static void vector_fmul_add_add_sse(float *dst
, const float *src0
, const float *src1
,
1965 const float *src2
, int src3
, int len
, int step
){
1966 x86_reg i
= (len
-8)*4;
1967 if(step
== 2 && src3
== 0){
1971 "movaps (%2,%0), %%xmm0 \n\t"
1972 "movaps 16(%2,%0), %%xmm1 \n\t"
1973 "mulps (%3,%0), %%xmm0 \n\t"
1974 "mulps 16(%3,%0), %%xmm1 \n\t"
1975 "addps (%4,%0), %%xmm0 \n\t"
1976 "addps 16(%4,%0), %%xmm1 \n\t"
1977 "movss %%xmm0, (%1) \n\t"
1978 "movss %%xmm1, 32(%1) \n\t"
1979 "movhlps %%xmm0, %%xmm2 \n\t"
1980 "movhlps %%xmm1, %%xmm3 \n\t"
1981 "movss %%xmm2, 16(%1) \n\t"
1982 "movss %%xmm3, 48(%1) \n\t"
1983 "shufps $0xb1, %%xmm0, %%xmm0 \n\t"
1984 "shufps $0xb1, %%xmm1, %%xmm1 \n\t"
1985 "movss %%xmm0, 8(%1) \n\t"
1986 "movss %%xmm1, 40(%1) \n\t"
1987 "movhlps %%xmm0, %%xmm2 \n\t"
1988 "movhlps %%xmm1, %%xmm3 \n\t"
1989 "movss %%xmm2, 24(%1) \n\t"
1990 "movss %%xmm3, 56(%1) \n\t"
1995 :"r"(src0
), "r"(src1
), "r"(src2
)
1999 else if(step
== 1 && src3
== 0){
2002 "movaps (%2,%0), %%xmm0 \n\t"
2003 "movaps 16(%2,%0), %%xmm1 \n\t"
2004 "mulps (%3,%0), %%xmm0 \n\t"
2005 "mulps 16(%3,%0), %%xmm1 \n\t"
2006 "addps (%4,%0), %%xmm0 \n\t"
2007 "addps 16(%4,%0), %%xmm1 \n\t"
2008 "movaps %%xmm0, (%1,%0) \n\t"
2009 "movaps %%xmm1, 16(%1,%0) \n\t"
2013 :"r"(dst
), "r"(src0
), "r"(src1
), "r"(src2
)
2018 ff_vector_fmul_add_add_c(dst
, src0
, src1
, src2
, src3
, len
, step
);
2021 static void float_to_int16_3dnow(int16_t *dst
, const float *src
, int len
){
2022 // not bit-exact: pf2id uses different rounding than C and SSE
2024 for(i
=0; i
<len
; i
+=4) {
2026 "pf2id %1, %%mm0 \n\t"
2027 "pf2id %2, %%mm1 \n\t"
2028 "packssdw %%mm1, %%mm0 \n\t"
2029 "movq %%mm0, %0 \n\t"
2031 :"m"(src
[i
]), "m"(src
[i
+2])
2034 asm volatile("femms");
2036 static void float_to_int16_sse(int16_t *dst
, const float *src
, int len
){
2038 for(i
=0; i
<len
; i
+=4) {
2040 "cvtps2pi %1, %%mm0 \n\t"
2041 "cvtps2pi %2, %%mm1 \n\t"
2042 "packssdw %%mm1, %%mm0 \n\t"
2043 "movq %%mm0, %0 \n\t"
2045 :"m"(src
[i
]), "m"(src
[i
+2])
2048 asm volatile("emms");
2051 extern void ff_snow_horizontal_compose97i_sse2(IDWTELEM
*b
, int width
);
2052 extern void ff_snow_horizontal_compose97i_mmx(IDWTELEM
*b
, int width
);
2053 extern void ff_snow_vertical_compose97i_sse2(IDWTELEM
*b0
, IDWTELEM
*b1
, IDWTELEM
*b2
, IDWTELEM
*b3
, IDWTELEM
*b4
, IDWTELEM
*b5
, int width
);
2054 extern void ff_snow_vertical_compose97i_mmx(IDWTELEM
*b0
, IDWTELEM
*b1
, IDWTELEM
*b2
, IDWTELEM
*b3
, IDWTELEM
*b4
, IDWTELEM
*b5
, int width
);
2055 extern void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc
, const int obmc_stride
, uint8_t * * block
, int b_w
, int b_h
,
2056 int src_x
, int src_y
, int src_stride
, slice_buffer
* sb
, int add
, uint8_t * dst8
);
2057 extern void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc
, const int obmc_stride
, uint8_t * * block
, int b_w
, int b_h
,
2058 int src_x
, int src_y
, int src_stride
, slice_buffer
* sb
, int add
, uint8_t * dst8
);
2060 void dsputil_init_mmx(DSPContext
* c
, AVCodecContext
*avctx
)
2062 mm_flags
= mm_support();
2064 if (avctx
->dsp_mask
) {
2065 if (avctx
->dsp_mask
& FF_MM_FORCE
)
2066 mm_flags
|= (avctx
->dsp_mask
& 0xffff);
2068 mm_flags
&= ~(avctx
->dsp_mask
& 0xffff);
2072 av_log(avctx
, AV_LOG_INFO
, "libavcodec: CPU flags:");
2073 if (mm_flags
& MM_MMX
)
2074 av_log(avctx
, AV_LOG_INFO
, " mmx");
2075 if (mm_flags
& MM_MMXEXT
)
2076 av_log(avctx
, AV_LOG_INFO
, " mmxext");
2077 if (mm_flags
& MM_3DNOW
)
2078 av_log(avctx
, AV_LOG_INFO
, " 3dnow");
2079 if (mm_flags
& MM_SSE
)
2080 av_log(avctx
, AV_LOG_INFO
, " sse");
2081 if (mm_flags
& MM_SSE2
)
2082 av_log(avctx
, AV_LOG_INFO
, " sse2");
2083 av_log(avctx
, AV_LOG_INFO
, "\n");
2086 if (mm_flags
& MM_MMX
) {
2087 const int idct_algo
= avctx
->idct_algo
;
2089 if(avctx
->lowres
==0){
2090 if(idct_algo
==FF_IDCT_AUTO
|| idct_algo
==FF_IDCT_SIMPLEMMX
){
2091 c
->idct_put
= ff_simple_idct_put_mmx
;
2092 c
->idct_add
= ff_simple_idct_add_mmx
;
2093 c
->idct
= ff_simple_idct_mmx
;
2094 c
->idct_permutation_type
= FF_SIMPLE_IDCT_PERM
;
2096 }else if(idct_algo
==FF_IDCT_LIBMPEG2MMX
){
2097 if(mm_flags
& MM_MMXEXT
){
2098 c
->idct_put
= ff_libmpeg2mmx2_idct_put
;
2099 c
->idct_add
= ff_libmpeg2mmx2_idct_add
;
2100 c
->idct
= ff_mmxext_idct
;
2102 c
->idct_put
= ff_libmpeg2mmx_idct_put
;
2103 c
->idct_add
= ff_libmpeg2mmx_idct_add
;
2104 c
->idct
= ff_mmx_idct
;
2106 c
->idct_permutation_type
= FF_LIBMPEG2_IDCT_PERM
;
2108 }else if((ENABLE_VP3_DECODER
|| ENABLE_VP5_DECODER
|| ENABLE_VP6_DECODER
) &&
2109 idct_algo
==FF_IDCT_VP3
&&
2110 avctx
->codec
->id
!=CODEC_ID_THEORA
&&
2111 !(avctx
->flags
& CODEC_FLAG_BITEXACT
)){
2112 if(mm_flags
& MM_SSE2
){
2113 c
->idct_put
= ff_vp3_idct_put_sse2
;
2114 c
->idct_add
= ff_vp3_idct_add_sse2
;
2115 c
->idct
= ff_vp3_idct_sse2
;
2116 c
->idct_permutation_type
= FF_TRANSPOSE_IDCT_PERM
;
2118 ff_vp3_dsp_init_mmx();
2119 c
->idct_put
= ff_vp3_idct_put_mmx
;
2120 c
->idct_add
= ff_vp3_idct_add_mmx
;
2121 c
->idct
= ff_vp3_idct_mmx
;
2122 c
->idct_permutation_type
= FF_PARTTRANS_IDCT_PERM
;
2124 }else if(idct_algo
==FF_IDCT_CAVS
){
2125 c
->idct_permutation_type
= FF_TRANSPOSE_IDCT_PERM
;
2126 }else if(idct_algo
==FF_IDCT_XVIDMMX
){
2127 if(mm_flags
& MM_SSE2
){
2128 c
->idct_put
= ff_idct_xvid_sse2_put
;
2129 c
->idct_add
= ff_idct_xvid_sse2_add
;
2130 c
->idct
= ff_idct_xvid_sse2
;
2131 c
->idct_permutation_type
= FF_SSE2_IDCT_PERM
;
2132 }else if(mm_flags
& MM_MMXEXT
){
2133 c
->idct_put
= ff_idct_xvid_mmx2_put
;
2134 c
->idct_add
= ff_idct_xvid_mmx2_add
;
2135 c
->idct
= ff_idct_xvid_mmx2
;
2137 c
->idct_put
= ff_idct_xvid_mmx_put
;
2138 c
->idct_add
= ff_idct_xvid_mmx_add
;
2139 c
->idct
= ff_idct_xvid_mmx
;
2144 c
->put_pixels_clamped
= put_pixels_clamped_mmx
;
2145 c
->put_signed_pixels_clamped
= put_signed_pixels_clamped_mmx
;
2146 c
->add_pixels_clamped
= add_pixels_clamped_mmx
;
2147 c
->clear_blocks
= clear_blocks_mmx
;
2149 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2150 c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
2151 c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
2152 c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
2153 c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
2155 SET_HPEL_FUNCS(put
, 0, 16, mmx
);
2156 SET_HPEL_FUNCS(put_no_rnd
, 0, 16, mmx
);
2157 SET_HPEL_FUNCS(avg
, 0, 16, mmx
);
2158 SET_HPEL_FUNCS(avg_no_rnd
, 0, 16, mmx
);
2159 SET_HPEL_FUNCS(put
, 1, 8, mmx
);
2160 SET_HPEL_FUNCS(put_no_rnd
, 1, 8, mmx
);
2161 SET_HPEL_FUNCS(avg
, 1, 8, mmx
);
2162 SET_HPEL_FUNCS(avg_no_rnd
, 1, 8, mmx
);
2166 c
->add_bytes
= add_bytes_mmx
;
2167 c
->add_bytes_l2
= add_bytes_l2_mmx
;
2169 c
->draw_edges
= draw_edges_mmx
;
2171 if (ENABLE_ANY_H263
) {
2172 c
->h263_v_loop_filter
= h263_v_loop_filter_mmx
;
2173 c
->h263_h_loop_filter
= h263_h_loop_filter_mmx
;
2175 c
->put_h264_chroma_pixels_tab
[0]= put_h264_chroma_mc8_mmx_rnd
;
2176 c
->put_h264_chroma_pixels_tab
[1]= put_h264_chroma_mc4_mmx
;
2177 c
->put_no_rnd_h264_chroma_pixels_tab
[0]= put_h264_chroma_mc8_mmx_nornd
;
2179 c
->h264_idct_dc_add
=
2180 c
->h264_idct_add
= ff_h264_idct_add_mmx
;
2181 c
->h264_idct8_dc_add
=
2182 c
->h264_idct8_add
= ff_h264_idct8_add_mmx
;
2183 if (mm_flags
& MM_SSE2
)
2184 c
->h264_idct8_add
= ff_h264_idct8_add_sse2
;
2186 if (mm_flags
& MM_MMXEXT
) {
2187 c
->prefetch
= prefetch_mmx2
;
2189 c
->put_pixels_tab
[0][1] = put_pixels16_x2_mmx2
;
2190 c
->put_pixels_tab
[0][2] = put_pixels16_y2_mmx2
;
2192 c
->avg_pixels_tab
[0][0] = avg_pixels16_mmx2
;
2193 c
->avg_pixels_tab
[0][1] = avg_pixels16_x2_mmx2
;
2194 c
->avg_pixels_tab
[0][2] = avg_pixels16_y2_mmx2
;
2196 c
->put_pixels_tab
[1][1] = put_pixels8_x2_mmx2
;
2197 c
->put_pixels_tab
[1][2] = put_pixels8_y2_mmx2
;
2199 c
->avg_pixels_tab
[1][0] = avg_pixels8_mmx2
;
2200 c
->avg_pixels_tab
[1][1] = avg_pixels8_x2_mmx2
;
2201 c
->avg_pixels_tab
[1][2] = avg_pixels8_y2_mmx2
;
2203 c
->h264_idct_dc_add
= ff_h264_idct_dc_add_mmx2
;
2204 c
->h264_idct8_dc_add
= ff_h264_idct8_dc_add_mmx2
;
2206 if(!(avctx
->flags
& CODEC_FLAG_BITEXACT
)){
2207 c
->put_no_rnd_pixels_tab
[0][1] = put_no_rnd_pixels16_x2_mmx2
;
2208 c
->put_no_rnd_pixels_tab
[0][2] = put_no_rnd_pixels16_y2_mmx2
;
2209 c
->put_no_rnd_pixels_tab
[1][1] = put_no_rnd_pixels8_x2_mmx2
;
2210 c
->put_no_rnd_pixels_tab
[1][2] = put_no_rnd_pixels8_y2_mmx2
;
2211 c
->avg_pixels_tab
[0][3] = avg_pixels16_xy2_mmx2
;
2212 c
->avg_pixels_tab
[1][3] = avg_pixels8_xy2_mmx2
;
2215 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2216 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \
2217 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \
2218 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \
2219 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \
2220 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \
2221 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \
2222 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \
2223 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \
2224 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \
2225 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \
2226 c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \
2227 c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \
2228 c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \
2229 c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \
2230 c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \
2231 c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU
2233 SET_QPEL_FUNCS(put_qpel
, 0, 16, mmx2
);
2234 SET_QPEL_FUNCS(put_qpel
, 1, 8, mmx2
);
2235 SET_QPEL_FUNCS(put_no_rnd_qpel
, 0, 16, mmx2
);
2236 SET_QPEL_FUNCS(put_no_rnd_qpel
, 1, 8, mmx2
);
2237 SET_QPEL_FUNCS(avg_qpel
, 0, 16, mmx2
);
2238 SET_QPEL_FUNCS(avg_qpel
, 1, 8, mmx2
);
2240 SET_QPEL_FUNCS(put_h264_qpel
, 0, 16, mmx2
);
2241 SET_QPEL_FUNCS(put_h264_qpel
, 1, 8, mmx2
);
2242 SET_QPEL_FUNCS(put_h264_qpel
, 2, 4, mmx2
);
2243 SET_QPEL_FUNCS(avg_h264_qpel
, 0, 16, mmx2
);
2244 SET_QPEL_FUNCS(avg_h264_qpel
, 1, 8, mmx2
);
2245 SET_QPEL_FUNCS(avg_h264_qpel
, 2, 4, mmx2
);
2247 SET_QPEL_FUNCS(put_2tap_qpel
, 0, 16, mmx2
);
2248 SET_QPEL_FUNCS(put_2tap_qpel
, 1, 8, mmx2
);
2249 SET_QPEL_FUNCS(avg_2tap_qpel
, 0, 16, mmx2
);
2250 SET_QPEL_FUNCS(avg_2tap_qpel
, 1, 8, mmx2
);
2252 c
->avg_h264_chroma_pixels_tab
[0]= avg_h264_chroma_mc8_mmx2_rnd
;
2253 c
->avg_h264_chroma_pixels_tab
[1]= avg_h264_chroma_mc4_mmx2
;
2254 c
->avg_h264_chroma_pixels_tab
[2]= avg_h264_chroma_mc2_mmx2
;
2255 c
->put_h264_chroma_pixels_tab
[2]= put_h264_chroma_mc2_mmx2
;
2256 c
->h264_v_loop_filter_luma
= h264_v_loop_filter_luma_mmx2
;
2257 c
->h264_h_loop_filter_luma
= h264_h_loop_filter_luma_mmx2
;
2258 c
->h264_v_loop_filter_chroma
= h264_v_loop_filter_chroma_mmx2
;
2259 c
->h264_h_loop_filter_chroma
= h264_h_loop_filter_chroma_mmx2
;
2260 c
->h264_v_loop_filter_chroma_intra
= h264_v_loop_filter_chroma_intra_mmx2
;
2261 c
->h264_h_loop_filter_chroma_intra
= h264_h_loop_filter_chroma_intra_mmx2
;
2262 c
->h264_loop_filter_strength
= h264_loop_filter_strength_mmx2
;
2264 c
->weight_h264_pixels_tab
[0]= ff_h264_weight_16x16_mmx2
;
2265 c
->weight_h264_pixels_tab
[1]= ff_h264_weight_16x8_mmx2
;
2266 c
->weight_h264_pixels_tab
[2]= ff_h264_weight_8x16_mmx2
;
2267 c
->weight_h264_pixels_tab
[3]= ff_h264_weight_8x8_mmx2
;
2268 c
->weight_h264_pixels_tab
[4]= ff_h264_weight_8x4_mmx2
;
2269 c
->weight_h264_pixels_tab
[5]= ff_h264_weight_4x8_mmx2
;
2270 c
->weight_h264_pixels_tab
[6]= ff_h264_weight_4x4_mmx2
;
2271 c
->weight_h264_pixels_tab
[7]= ff_h264_weight_4x2_mmx2
;
2273 c
->biweight_h264_pixels_tab
[0]= ff_h264_biweight_16x16_mmx2
;
2274 c
->biweight_h264_pixels_tab
[1]= ff_h264_biweight_16x8_mmx2
;
2275 c
->biweight_h264_pixels_tab
[2]= ff_h264_biweight_8x16_mmx2
;
2276 c
->biweight_h264_pixels_tab
[3]= ff_h264_biweight_8x8_mmx2
;
2277 c
->biweight_h264_pixels_tab
[4]= ff_h264_biweight_8x4_mmx2
;
2278 c
->biweight_h264_pixels_tab
[5]= ff_h264_biweight_4x8_mmx2
;
2279 c
->biweight_h264_pixels_tab
[6]= ff_h264_biweight_4x4_mmx2
;
2280 c
->biweight_h264_pixels_tab
[7]= ff_h264_biweight_4x2_mmx2
;
2282 if (ENABLE_CAVS_DECODER
)
2283 ff_cavsdsp_init_mmx2(c
, avctx
);
2285 if (ENABLE_VC1_DECODER
|| ENABLE_WMV3_DECODER
)
2286 ff_vc1dsp_init_mmx(c
, avctx
);
2288 c
->add_png_paeth_prediction
= add_png_paeth_prediction_mmx2
;
2289 } else if (mm_flags
& MM_3DNOW
) {
2290 c
->prefetch
= prefetch_3dnow
;
2292 c
->put_pixels_tab
[0][1] = put_pixels16_x2_3dnow
;
2293 c
->put_pixels_tab
[0][2] = put_pixels16_y2_3dnow
;
2295 c
->avg_pixels_tab
[0][0] = avg_pixels16_3dnow
;
2296 c
->avg_pixels_tab
[0][1] = avg_pixels16_x2_3dnow
;
2297 c
->avg_pixels_tab
[0][2] = avg_pixels16_y2_3dnow
;
2299 c
->put_pixels_tab
[1][1] = put_pixels8_x2_3dnow
;
2300 c
->put_pixels_tab
[1][2] = put_pixels8_y2_3dnow
;
2302 c
->avg_pixels_tab
[1][0] = avg_pixels8_3dnow
;
2303 c
->avg_pixels_tab
[1][1] = avg_pixels8_x2_3dnow
;
2304 c
->avg_pixels_tab
[1][2] = avg_pixels8_y2_3dnow
;
2306 if(!(avctx
->flags
& CODEC_FLAG_BITEXACT
)){
2307 c
->put_no_rnd_pixels_tab
[0][1] = put_no_rnd_pixels16_x2_3dnow
;
2308 c
->put_no_rnd_pixels_tab
[0][2] = put_no_rnd_pixels16_y2_3dnow
;
2309 c
->put_no_rnd_pixels_tab
[1][1] = put_no_rnd_pixels8_x2_3dnow
;
2310 c
->put_no_rnd_pixels_tab
[1][2] = put_no_rnd_pixels8_y2_3dnow
;
2311 c
->avg_pixels_tab
[0][3] = avg_pixels16_xy2_3dnow
;
2312 c
->avg_pixels_tab
[1][3] = avg_pixels8_xy2_3dnow
;
2315 SET_QPEL_FUNCS(put_qpel
, 0, 16, 3dnow
);
2316 SET_QPEL_FUNCS(put_qpel
, 1, 8, 3dnow
);
2317 SET_QPEL_FUNCS(put_no_rnd_qpel
, 0, 16, 3dnow
);
2318 SET_QPEL_FUNCS(put_no_rnd_qpel
, 1, 8, 3dnow
);
2319 SET_QPEL_FUNCS(avg_qpel
, 0, 16, 3dnow
);
2320 SET_QPEL_FUNCS(avg_qpel
, 1, 8, 3dnow
);
2322 SET_QPEL_FUNCS(put_h264_qpel
, 0, 16, 3dnow
);
2323 SET_QPEL_FUNCS(put_h264_qpel
, 1, 8, 3dnow
);
2324 SET_QPEL_FUNCS(put_h264_qpel
, 2, 4, 3dnow
);
2325 SET_QPEL_FUNCS(avg_h264_qpel
, 0, 16, 3dnow
);
2326 SET_QPEL_FUNCS(avg_h264_qpel
, 1, 8, 3dnow
);
2327 SET_QPEL_FUNCS(avg_h264_qpel
, 2, 4, 3dnow
);
2329 SET_QPEL_FUNCS(put_2tap_qpel
, 0, 16, 3dnow
);
2330 SET_QPEL_FUNCS(put_2tap_qpel
, 1, 8, 3dnow
);
2331 SET_QPEL_FUNCS(avg_2tap_qpel
, 0, 16, 3dnow
);
2332 SET_QPEL_FUNCS(avg_2tap_qpel
, 1, 8, 3dnow
);
2334 c
->avg_h264_chroma_pixels_tab
[0]= avg_h264_chroma_mc8_3dnow_rnd
;
2335 c
->avg_h264_chroma_pixels_tab
[1]= avg_h264_chroma_mc4_3dnow
;
2337 if (ENABLE_CAVS_DECODER
)
2338 ff_cavsdsp_init_3dnow(c
, avctx
);
2342 #define H264_QPEL_FUNCS(x, y, CPU)\
2343 c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
2344 c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
2345 c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
2346 c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
2347 if((mm_flags
& MM_SSE2
) && !(mm_flags
& MM_3DNOW
)){
2348 // these functions are slower than mmx on AMD, but faster on Intel
2349 /* FIXME works in most codecs, but crashes svq1 due to unaligned chroma
2350 c->put_pixels_tab[0][0] = put_pixels16_sse2;
2351 c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
2353 H264_QPEL_FUNCS(0, 0, sse2
);
2355 if(mm_flags
& MM_SSE2
){
2356 H264_QPEL_FUNCS(0, 1, sse2
);
2357 H264_QPEL_FUNCS(0, 2, sse2
);
2358 H264_QPEL_FUNCS(0, 3, sse2
);
2359 H264_QPEL_FUNCS(1, 1, sse2
);
2360 H264_QPEL_FUNCS(1, 2, sse2
);
2361 H264_QPEL_FUNCS(1, 3, sse2
);
2362 H264_QPEL_FUNCS(2, 1, sse2
);
2363 H264_QPEL_FUNCS(2, 2, sse2
);
2364 H264_QPEL_FUNCS(2, 3, sse2
);
2365 H264_QPEL_FUNCS(3, 1, sse2
);
2366 H264_QPEL_FUNCS(3, 2, sse2
);
2367 H264_QPEL_FUNCS(3, 3, sse2
);
2370 if(mm_flags
& MM_SSSE3
){
2371 H264_QPEL_FUNCS(1, 0, ssse3
);
2372 H264_QPEL_FUNCS(1, 1, ssse3
);
2373 H264_QPEL_FUNCS(1, 2, ssse3
);
2374 H264_QPEL_FUNCS(1, 3, ssse3
);
2375 H264_QPEL_FUNCS(2, 0, ssse3
);
2376 H264_QPEL_FUNCS(2, 1, ssse3
);
2377 H264_QPEL_FUNCS(2, 2, ssse3
);
2378 H264_QPEL_FUNCS(2, 3, ssse3
);
2379 H264_QPEL_FUNCS(3, 0, ssse3
);
2380 H264_QPEL_FUNCS(3, 1, ssse3
);
2381 H264_QPEL_FUNCS(3, 2, ssse3
);
2382 H264_QPEL_FUNCS(3, 3, ssse3
);
2383 c
->put_no_rnd_h264_chroma_pixels_tab
[0]= put_h264_chroma_mc8_ssse3_nornd
;
2384 c
->put_h264_chroma_pixels_tab
[0]= put_h264_chroma_mc8_ssse3_rnd
;
2385 c
->avg_h264_chroma_pixels_tab
[0]= avg_h264_chroma_mc8_ssse3_rnd
;
2386 c
->put_h264_chroma_pixels_tab
[1]= put_h264_chroma_mc4_ssse3
;
2387 c
->avg_h264_chroma_pixels_tab
[1]= avg_h264_chroma_mc4_ssse3
;
2388 c
->add_png_paeth_prediction
= add_png_paeth_prediction_ssse3
;
2392 #ifdef CONFIG_SNOW_DECODER
2393 if(mm_flags
& MM_SSE2
& 0){
2394 c
->horizontal_compose97i
= ff_snow_horizontal_compose97i_sse2
;
2396 c
->vertical_compose97i
= ff_snow_vertical_compose97i_sse2
;
2398 c
->inner_add_yblock
= ff_snow_inner_add_yblock_sse2
;
2401 if(mm_flags
& MM_MMXEXT
){
2402 c
->horizontal_compose97i
= ff_snow_horizontal_compose97i_mmx
;
2404 c
->vertical_compose97i
= ff_snow_vertical_compose97i_mmx
;
2407 c
->inner_add_yblock
= ff_snow_inner_add_yblock_mmx
;
2411 if(mm_flags
& MM_3DNOW
){
2412 c
->vorbis_inverse_coupling
= vorbis_inverse_coupling_3dnow
;
2413 c
->vector_fmul
= vector_fmul_3dnow
;
2414 if(!(avctx
->flags
& CODEC_FLAG_BITEXACT
))
2415 c
->float_to_int16
= float_to_int16_3dnow
;
2417 if(mm_flags
& MM_3DNOWEXT
)
2418 c
->vector_fmul_reverse
= vector_fmul_reverse_3dnow2
;
2419 if(mm_flags
& MM_SSE
){
2420 c
->vorbis_inverse_coupling
= vorbis_inverse_coupling_sse
;
2421 c
->vector_fmul
= vector_fmul_sse
;
2422 c
->float_to_int16
= float_to_int16_sse
;
2423 c
->vector_fmul_reverse
= vector_fmul_reverse_sse
;
2424 c
->vector_fmul_add_add
= vector_fmul_add_add_sse
;
2426 if(mm_flags
& MM_3DNOW
)
2427 c
->vector_fmul_add_add
= vector_fmul_add_add_3dnow
; // faster than sse
2430 if (ENABLE_ENCODERS
)
2431 dsputilenc_init_mmx(c
, avctx
);
2434 // for speed testing
2435 get_pixels
= just_return
;
2436 put_pixels_clamped
= just_return
;
2437 add_pixels_clamped
= just_return
;
2439 pix_abs16x16
= just_return
;
2440 pix_abs16x16_x2
= just_return
;
2441 pix_abs16x16_y2
= just_return
;
2442 pix_abs16x16_xy2
= just_return
;
2444 put_pixels_tab
[0] = just_return
;
2445 put_pixels_tab
[1] = just_return
;
2446 put_pixels_tab
[2] = just_return
;
2447 put_pixels_tab
[3] = just_return
;
2449 put_no_rnd_pixels_tab
[0] = just_return
;
2450 put_no_rnd_pixels_tab
[1] = just_return
;
2451 put_no_rnd_pixels_tab
[2] = just_return
;
2452 put_no_rnd_pixels_tab
[3] = just_return
;
2454 avg_pixels_tab
[0] = just_return
;
2455 avg_pixels_tab
[1] = just_return
;
2456 avg_pixels_tab
[2] = just_return
;
2457 avg_pixels_tab
[3] = just_return
;
2459 avg_no_rnd_pixels_tab
[0] = just_return
;
2460 avg_no_rnd_pixels_tab
[1] = just_return
;
2461 avg_no_rnd_pixels_tab
[2] = just_return
;
2462 avg_no_rnd_pixels_tab
[3] = just_return
;
2464 //av_fdct = just_return;
2465 //ff_idct = just_return;