2 * VC-1 and WMV3 - DSP functions MMX-optimized
3 * Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr>
5 * Permission is hereby granted, free of charge, to any person
6 * obtaining a copy of this software and associated documentation
7 * files (the "Software"), to deal in the Software without
8 * restriction, including without limitation the rights to use,
9 * copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
21 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
22 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "libavutil/cpu.h"
28 #include "libavutil/internal.h"
29 #include "libavutil/mem.h"
30 #include "libavutil/x86/asm.h"
31 #include "libavutil/x86/cpu.h"
32 #include "libavcodec/vc1dsp.h"
33 #include "constants.h"
40 #define OP_AVG(S,D) "pavgb " #S ", " #D " \n\t"
42 /** Add rounder from mm7 to mm3 and pack result at destination */
43 #define NORMALIZE_MMX(SHIFT) \
44 "paddw %%mm7, %%mm3 \n\t" /* +bias-r */ \
45 "paddw %%mm7, %%mm4 \n\t" /* +bias-r */ \
46 "psraw "SHIFT", %%mm3 \n\t" \
47 "psraw "SHIFT", %%mm4 \n\t"
49 #define TRANSFER_DO_PACK(OP) \
50 "packuswb %%mm4, %%mm3 \n\t" \
52 "movq %%mm3, (%2) \n\t"
54 #define TRANSFER_DONT_PACK(OP) \
57 "movq %%mm3, 0(%2) \n\t" \
58 "movq %%mm4, 8(%2) \n\t"
60 /** @see MSPEL_FILTER13_CORE for use as UNPACK macro */
61 #define DO_UNPACK(reg) "punpcklbw %%mm0, " reg "\n\t"
62 #define DONT_UNPACK(reg)
64 /** Compute the rounder 32-r or 8-r and unpacks it to mm7 */
65 #define LOAD_ROUNDER_MMX(ROUND) \
66 "movd "ROUND", %%mm7 \n\t" \
67 "punpcklwd %%mm7, %%mm7 \n\t" \
68 "punpckldq %%mm7, %%mm7 \n\t"
70 #define SHIFT2_LINE(OFF, R0,R1,R2,R3) \
71 "paddw %%mm"#R2", %%mm"#R1" \n\t" \
72 "movd (%0,%3), %%mm"#R0" \n\t" \
73 "pmullw %%mm6, %%mm"#R1" \n\t" \
74 "punpcklbw %%mm0, %%mm"#R0" \n\t" \
75 "movd (%0,%2), %%mm"#R3" \n\t" \
76 "psubw %%mm"#R0", %%mm"#R1" \n\t" \
77 "punpcklbw %%mm0, %%mm"#R3" \n\t" \
78 "paddw %%mm7, %%mm"#R1" \n\t" \
79 "psubw %%mm"#R3", %%mm"#R1" \n\t" \
80 "psraw %4, %%mm"#R1" \n\t" \
81 "movq %%mm"#R1", "#OFF"(%1) \n\t" \
84 /** Sacrificing mm6 allows to pipeline loads from src */
85 static void vc1_put_ver_16b_shift2_mmx(int16_t *dst
,
86 const uint8_t *src
, x86_reg stride
,
87 int rnd
, int64_t shift
)
90 "mov $3, %%"FF_REG_c
" \n\t"
91 LOAD_ROUNDER_MMX("%5")
92 "movq "MANGLE(ff_pw_9
)", %%mm6 \n\t"
94 "movd (%0), %%mm2 \n\t"
96 "movd (%0), %%mm3 \n\t"
97 "punpcklbw %%mm0, %%mm2 \n\t"
98 "punpcklbw %%mm0, %%mm3 \n\t"
99 SHIFT2_LINE( 0, 1, 2, 3, 4)
100 SHIFT2_LINE( 24, 2, 3, 4, 1)
101 SHIFT2_LINE( 48, 3, 4, 1, 2)
102 SHIFT2_LINE( 72, 4, 1, 2, 3)
103 SHIFT2_LINE( 96, 1, 2, 3, 4)
104 SHIFT2_LINE(120, 2, 3, 4, 1)
105 SHIFT2_LINE(144, 3, 4, 1, 2)
106 SHIFT2_LINE(168, 4, 1, 2, 3)
109 "dec %%"FF_REG_c
" \n\t"
111 : "+r"(src
), "+r"(dst
)
112 : "r"(stride
), "r"(-2*stride
),
113 "m"(shift
), "m"(rnd
), "r"(9*stride
-4)
114 : "%"FF_REG_c
, "memory"
119 * Data is already unpacked, so some operations can directly be made from
122 #define VC1_HOR_16b_SHIFT2(OP, OPNAME)\
123 static void OPNAME ## vc1_hor_16b_shift2_mmx(uint8_t *dst, x86_reg stride,\
124 const int16_t *src, int rnd)\
129 rnd -= (-1+9+9-1)*1024; /* Add -1024 bias */\
131 LOAD_ROUNDER_MMX("%4")\
132 "movq "MANGLE(ff_pw_128)", %%mm6\n\t"\
133 "movq "MANGLE(ff_pw_9)", %%mm5 \n\t"\
135 "movq 2*0+0(%1), %%mm1 \n\t"\
136 "movq 2*0+8(%1), %%mm2 \n\t"\
137 "movq 2*1+0(%1), %%mm3 \n\t"\
138 "movq 2*1+8(%1), %%mm4 \n\t"\
139 "paddw 2*3+0(%1), %%mm1 \n\t"\
140 "paddw 2*3+8(%1), %%mm2 \n\t"\
141 "paddw 2*2+0(%1), %%mm3 \n\t"\
142 "paddw 2*2+8(%1), %%mm4 \n\t"\
143 "pmullw %%mm5, %%mm3 \n\t"\
144 "pmullw %%mm5, %%mm4 \n\t"\
145 "psubw %%mm1, %%mm3 \n\t"\
146 "psubw %%mm2, %%mm4 \n\t"\
149 "paddw %%mm6, %%mm3 \n\t"\
150 "paddw %%mm6, %%mm4 \n\t"\
151 TRANSFER_DO_PACK(OP)\
156 : "+r"(h), "+r" (src), "+r" (dst)\
157 : "r"(stride), "m"(rnd)\
162 VC1_HOR_16b_SHIFT2(OP_PUT
, put_
)
163 VC1_HOR_16b_SHIFT2(OP_AVG
, avg_
)
167 * Purely vertical or horizontal 1/2 shift interpolation.
168 * Sacrifice mm6 for *9 factor.
170 #define VC1_SHIFT2(OP, OPNAME)\
171 static void OPNAME ## vc1_shift2_mmx(uint8_t *dst, const uint8_t *src,\
172 x86_reg stride, int rnd, x86_reg offset)\
176 "mov $8, %%"FF_REG_c" \n\t"\
177 LOAD_ROUNDER_MMX("%5")\
178 "movq "MANGLE(ff_pw_9)", %%mm6\n\t"\
180 "movd 0(%0 ), %%mm3 \n\t"\
181 "movd 4(%0 ), %%mm4 \n\t"\
182 "movd 0(%0,%2), %%mm1 \n\t"\
183 "movd 4(%0,%2), %%mm2 \n\t"\
185 "punpcklbw %%mm0, %%mm3 \n\t"\
186 "punpcklbw %%mm0, %%mm4 \n\t"\
187 "punpcklbw %%mm0, %%mm1 \n\t"\
188 "punpcklbw %%mm0, %%mm2 \n\t"\
189 "paddw %%mm1, %%mm3 \n\t"\
190 "paddw %%mm2, %%mm4 \n\t"\
191 "movd 0(%0,%3), %%mm1 \n\t"\
192 "movd 4(%0,%3), %%mm2 \n\t"\
193 "pmullw %%mm6, %%mm3 \n\t" /* 0,9,9,0*/\
194 "pmullw %%mm6, %%mm4 \n\t" /* 0,9,9,0*/\
195 "punpcklbw %%mm0, %%mm1 \n\t"\
196 "punpcklbw %%mm0, %%mm2 \n\t"\
197 "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,0*/\
198 "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,0*/\
199 "movd 0(%0,%2), %%mm1 \n\t"\
200 "movd 4(%0,%2), %%mm2 \n\t"\
201 "punpcklbw %%mm0, %%mm1 \n\t"\
202 "punpcklbw %%mm0, %%mm2 \n\t"\
203 "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,-1*/\
204 "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,-1*/\
206 "packuswb %%mm4, %%mm3 \n\t"\
208 "movq %%mm3, (%1) \n\t"\
211 "dec %%"FF_REG_c" \n\t"\
213 : "+r"(src), "+r"(dst)\
214 : "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),\
216 : "%"FF_REG_c, "memory"\
220 VC1_SHIFT2(OP_PUT
, put_
)
221 VC1_SHIFT2(OP_AVG
, avg_
)
224 * Core of the 1/4 and 3/4 shift bicubic interpolation.
226 * @param UNPACK Macro unpacking arguments from 8 to 16 bits (can be empty).
227 * @param MOVQ "movd 1" or "movq 2", if data read is already unpacked.
228 * @param A1 Address of 1st tap (beware of unpacked/packed).
229 * @param A2 Address of 2nd tap
230 * @param A3 Address of 3rd tap
231 * @param A4 Address of 4th tap
233 #define MSPEL_FILTER13_CORE(UNPACK, MOVQ, A1, A2, A3, A4) \
234 MOVQ "*0+"A1", %%mm1 \n\t" \
235 MOVQ "*4+"A1", %%mm2 \n\t" \
238 "pmullw "MANGLE(ff_pw_3)", %%mm1\n\t" \
239 "pmullw "MANGLE(ff_pw_3)", %%mm2\n\t" \
240 MOVQ "*0+"A2", %%mm3 \n\t" \
241 MOVQ "*4+"A2", %%mm4 \n\t" \
244 "pmullw %%mm6, %%mm3 \n\t" /* *18 */ \
245 "pmullw %%mm6, %%mm4 \n\t" /* *18 */ \
246 "psubw %%mm1, %%mm3 \n\t" /* 18,-3 */ \
247 "psubw %%mm2, %%mm4 \n\t" /* 18,-3 */ \
248 MOVQ "*0+"A4", %%mm1 \n\t" \
249 MOVQ "*4+"A4", %%mm2 \n\t" \
252 "psllw $2, %%mm1 \n\t" /* 4* */ \
253 "psllw $2, %%mm2 \n\t" /* 4* */ \
254 "psubw %%mm1, %%mm3 \n\t" /* -4,18,-3 */ \
255 "psubw %%mm2, %%mm4 \n\t" /* -4,18,-3 */ \
256 MOVQ "*0+"A3", %%mm1 \n\t" \
257 MOVQ "*4+"A3", %%mm2 \n\t" \
260 "pmullw %%mm5, %%mm1 \n\t" /* *53 */ \
261 "pmullw %%mm5, %%mm2 \n\t" /* *53 */ \
262 "paddw %%mm1, %%mm3 \n\t" /* 4,53,18,-3 */ \
263 "paddw %%mm2, %%mm4 \n\t" /* 4,53,18,-3 */
266 * Macro to build the vertical 16 bits version of vc1_put_shift[13].
267 * Here, offset=src_stride. Parameters passed A1 to A4 must use
268 * %3 (src_stride) and %4 (3*src_stride).
270 * @param NAME Either 1 or 3
271 * @see MSPEL_FILTER13_CORE for information on A1->A4
273 #define MSPEL_FILTER13_VER_16B(NAME, A1, A2, A3, A4) \
275 vc1_put_ver_16b_ ## NAME ## _mmx(int16_t *dst, const uint8_t *src, \
276 x86_reg src_stride, \
277 int rnd, int64_t shift) \
282 LOAD_ROUNDER_MMX("%5") \
283 "movq "MANGLE(ff_pw_53)", %%mm5\n\t" \
284 "movq "MANGLE(ff_pw_18)", %%mm6\n\t" \
287 MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \
288 NORMALIZE_MMX("%6") \
289 TRANSFER_DONT_PACK(OP_PUT) \
290 /* Last 3 (in fact 4) bytes on the line */ \
291 "movd 8+"A1", %%mm1 \n\t" \
293 "movq %%mm1, %%mm3 \n\t" \
294 "paddw %%mm1, %%mm1 \n\t" \
295 "paddw %%mm3, %%mm1 \n\t" /* 3* */ \
296 "movd 8+"A2", %%mm3 \n\t" \
298 "pmullw %%mm6, %%mm3 \n\t" /* *18 */ \
299 "psubw %%mm1, %%mm3 \n\t" /*18,-3 */ \
300 "movd 8+"A3", %%mm1 \n\t" \
302 "pmullw %%mm5, %%mm1 \n\t" /* *53 */ \
303 "paddw %%mm1, %%mm3 \n\t" /*53,18,-3 */ \
304 "movd 8+"A4", %%mm1 \n\t" \
306 "psllw $2, %%mm1 \n\t" /* 4* */ \
307 "psubw %%mm1, %%mm3 \n\t" \
308 "paddw %%mm7, %%mm3 \n\t" \
309 "psraw %6, %%mm3 \n\t" \
310 "movq %%mm3, 16(%2) \n\t" \
315 : "+r"(h), "+r" (src), "+r" (dst) \
316 : "r"(src_stride), "r"(3*src_stride), \
317 "m"(rnd), "m"(shift) \
323 * Macro to build the horizontal 16 bits version of vc1_put_shift[13].
324 * Here, offset=16 bits, so parameters passed A1 to A4 should be simple.
326 * @param NAME Either 1 or 3
327 * @see MSPEL_FILTER13_CORE for information on A1->A4
329 #define MSPEL_FILTER13_HOR_16B(NAME, A1, A2, A3, A4, OP, OPNAME) \
331 OPNAME ## vc1_hor_16b_ ## NAME ## _mmx(uint8_t *dst, x86_reg stride, \
332 const int16_t *src, int rnd) \
336 rnd -= (-4+58+13-3)*256; /* Add -256 bias */ \
338 LOAD_ROUNDER_MMX("%4") \
339 "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \
340 "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \
343 MSPEL_FILTER13_CORE(DONT_UNPACK, "movq 2", A1, A2, A3, A4) \
344 NORMALIZE_MMX("$7") \
346 "paddw "MANGLE(ff_pw_128)", %%mm3 \n\t" \
347 "paddw "MANGLE(ff_pw_128)", %%mm4 \n\t" \
348 TRANSFER_DO_PACK(OP) \
353 : "+r"(h), "+r" (src), "+r" (dst) \
354 : "r"(stride), "m"(rnd) \
360 * Macro to build the 8 bits, any direction, version of vc1_put_shift[13].
361 * Here, offset=src_stride. Parameters passed A1 to A4 must use
362 * %3 (offset) and %4 (3*offset).
364 * @param NAME Either 1 or 3
365 * @see MSPEL_FILTER13_CORE for information on A1->A4
367 #define MSPEL_FILTER13_8B(NAME, A1, A2, A3, A4, OP, OPNAME) \
369 OPNAME ## vc1_## NAME ## _mmx(uint8_t *dst, const uint8_t *src, \
370 x86_reg stride, int rnd, x86_reg offset) \
376 LOAD_ROUNDER_MMX("%6") \
377 "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \
378 "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \
381 MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \
382 NORMALIZE_MMX("$6") \
383 TRANSFER_DO_PACK(OP) \
388 : "+r"(h), "+r" (src), "+r" (dst) \
389 : "r"(offset), "r"(3*offset), "g"(stride), "m"(rnd) \
394 /** 1/4 shift bicubic interpolation */
395 MSPEL_FILTER13_8B (shift1
, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_PUT
, put_
)
396 MSPEL_FILTER13_8B (shift1
, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_AVG
, avg_
)
397 MSPEL_FILTER13_VER_16B(shift1
, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )")
398 MSPEL_FILTER13_HOR_16B(shift1
, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_PUT
, put_
)
399 MSPEL_FILTER13_HOR_16B(shift1
, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_AVG
, avg_
)
401 /** 3/4 shift bicubic interpolation */
402 MSPEL_FILTER13_8B (shift3
, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_PUT
, put_
)
403 MSPEL_FILTER13_8B (shift3
, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_AVG
, avg_
)
404 MSPEL_FILTER13_VER_16B(shift3
, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )")
405 MSPEL_FILTER13_HOR_16B(shift3
, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_PUT
, put_
)
406 MSPEL_FILTER13_HOR_16B(shift3
, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_AVG
, avg_
)
408 typedef void (*vc1_mspel_mc_filter_ver_16bits
)(int16_t *dst
, const uint8_t *src
, x86_reg src_stride
, int rnd
, int64_t shift
);
409 typedef void (*vc1_mspel_mc_filter_hor_16bits
)(uint8_t *dst
, x86_reg dst_stride
, const int16_t *src
, int rnd
);
410 typedef void (*vc1_mspel_mc_filter_8bits
)(uint8_t *dst
, const uint8_t *src
, x86_reg stride
, int rnd
, x86_reg offset
);
413 * Interpolate fractional pel values by applying proper vertical then
416 * @param dst Destination buffer for interpolated pels.
417 * @param src Source buffer.
418 * @param stride Stride for both src and dst buffers.
419 * @param hmode Horizontal filter (expressed in quarter pixels shift).
420 * @param hmode Vertical filter.
421 * @param rnd Rounding bias.
423 #define VC1_MSPEL_MC(OP)\
424 static void OP ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,\
425 int hmode, int vmode, int rnd)\
427 static const vc1_mspel_mc_filter_ver_16bits vc1_put_shift_ver_16bits[] =\
428 { NULL, vc1_put_ver_16b_shift1_mmx, vc1_put_ver_16b_shift2_mmx, vc1_put_ver_16b_shift3_mmx };\
429 static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] =\
430 { NULL, OP ## vc1_hor_16b_shift1_mmx, OP ## vc1_hor_16b_shift2_mmx, OP ## vc1_hor_16b_shift3_mmx };\
431 static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] =\
432 { NULL, OP ## vc1_shift1_mmx, OP ## vc1_shift2_mmx, OP ## vc1_shift3_mmx };\
435 "pxor %%mm0, %%mm0 \n\t"\
439 if (vmode) { /* Vertical filter to apply */\
440 if (hmode) { /* Horizontal filter to apply, output to tmp */\
441 static const int shift_value[] = { 0, 5, 1, 5 };\
442 int shift = (shift_value[hmode]+shift_value[vmode])>>1;\
444 DECLARE_ALIGNED(16, int16_t, tmp)[12*8];\
446 r = (1<<(shift-1)) + rnd-1;\
447 vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);\
449 vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd);\
452 else { /* No horizontal filter, output 8 lines to dst */\
453 vc1_put_shift_8bits[vmode](dst, src, stride, 1-rnd, stride);\
458 /* Horizontal mode with no vertical mode */\
459 vc1_put_shift_8bits[hmode](dst, src, stride, rnd, 1);\
465 /** Macro to ease bicubic filter interpolation functions declarations */
466 #define DECLARE_FUNCTION(a, b) \
467 static void put_vc1_mspel_mc ## a ## b ## _mmx(uint8_t *dst, \
468 const uint8_t *src, \
472 put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
474 static void avg_vc1_mspel_mc ## a ## b ## _mmxext(uint8_t *dst, \
475 const uint8_t *src, \
479 avg_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
482 DECLARE_FUNCTION(0, 1)
483 DECLARE_FUNCTION(0, 2)
484 DECLARE_FUNCTION(0, 3)
486 DECLARE_FUNCTION(1, 0)
487 DECLARE_FUNCTION(1, 1)
488 DECLARE_FUNCTION(1, 2)
489 DECLARE_FUNCTION(1, 3)
491 DECLARE_FUNCTION(2, 0)
492 DECLARE_FUNCTION(2, 1)
493 DECLARE_FUNCTION(2, 2)
494 DECLARE_FUNCTION(2, 3)
496 DECLARE_FUNCTION(3, 0)
497 DECLARE_FUNCTION(3, 1)
498 DECLARE_FUNCTION(3, 2)
499 DECLARE_FUNCTION(3, 3)
501 static void vc1_inv_trans_4x4_dc_mmxext(uint8_t *dest
, ptrdiff_t stride
,
505 dc
= (17 * dc
+ 4) >> 3;
506 dc
= (17 * dc
+ 64) >> 7;
508 "movd %0, %%mm0 \n\t"
509 "pshufw $0, %%mm0, %%mm0 \n\t"
510 "pxor %%mm1, %%mm1 \n\t"
511 "psubw %%mm0, %%mm1 \n\t"
512 "packuswb %%mm0, %%mm0 \n\t"
513 "packuswb %%mm1, %%mm1 \n\t"
517 "movd %0, %%mm2 \n\t"
518 "movd %1, %%mm3 \n\t"
519 "movd %2, %%mm4 \n\t"
520 "movd %3, %%mm5 \n\t"
521 "paddusb %%mm0, %%mm2 \n\t"
522 "paddusb %%mm0, %%mm3 \n\t"
523 "paddusb %%mm0, %%mm4 \n\t"
524 "paddusb %%mm0, %%mm5 \n\t"
525 "psubusb %%mm1, %%mm2 \n\t"
526 "psubusb %%mm1, %%mm3 \n\t"
527 "psubusb %%mm1, %%mm4 \n\t"
528 "psubusb %%mm1, %%mm5 \n\t"
529 "movd %%mm2, %0 \n\t"
530 "movd %%mm3, %1 \n\t"
531 "movd %%mm4, %2 \n\t"
532 "movd %%mm5, %3 \n\t"
533 :"+m"(*(uint32_t *)(dest
+ 0 * stride
)),
534 "+m"(*(uint32_t *)(dest
+ 1 * stride
)),
535 "+m"(*(uint32_t *)(dest
+ 2 * stride
)),
536 "+m"(*(uint32_t *)(dest
+ 3 * stride
))
540 static void vc1_inv_trans_4x8_dc_mmxext(uint8_t *dest
, ptrdiff_t stride
,
544 dc
= (17 * dc
+ 4) >> 3;
545 dc
= (12 * dc
+ 64) >> 7;
547 "movd %0, %%mm0 \n\t"
548 "pshufw $0, %%mm0, %%mm0 \n\t"
549 "pxor %%mm1, %%mm1 \n\t"
550 "psubw %%mm0, %%mm1 \n\t"
551 "packuswb %%mm0, %%mm0 \n\t"
552 "packuswb %%mm1, %%mm1 \n\t"
556 "movd %0, %%mm2 \n\t"
557 "movd %1, %%mm3 \n\t"
558 "movd %2, %%mm4 \n\t"
559 "movd %3, %%mm5 \n\t"
560 "paddusb %%mm0, %%mm2 \n\t"
561 "paddusb %%mm0, %%mm3 \n\t"
562 "paddusb %%mm0, %%mm4 \n\t"
563 "paddusb %%mm0, %%mm5 \n\t"
564 "psubusb %%mm1, %%mm2 \n\t"
565 "psubusb %%mm1, %%mm3 \n\t"
566 "psubusb %%mm1, %%mm4 \n\t"
567 "psubusb %%mm1, %%mm5 \n\t"
568 "movd %%mm2, %0 \n\t"
569 "movd %%mm3, %1 \n\t"
570 "movd %%mm4, %2 \n\t"
571 "movd %%mm5, %3 \n\t"
572 :"+m"(*(uint32_t *)(dest
+ 0 * stride
)),
573 "+m"(*(uint32_t *)(dest
+ 1 * stride
)),
574 "+m"(*(uint32_t *)(dest
+ 2 * stride
)),
575 "+m"(*(uint32_t *)(dest
+ 3 * stride
))
579 "movd %0, %%mm2 \n\t"
580 "movd %1, %%mm3 \n\t"
581 "movd %2, %%mm4 \n\t"
582 "movd %3, %%mm5 \n\t"
583 "paddusb %%mm0, %%mm2 \n\t"
584 "paddusb %%mm0, %%mm3 \n\t"
585 "paddusb %%mm0, %%mm4 \n\t"
586 "paddusb %%mm0, %%mm5 \n\t"
587 "psubusb %%mm1, %%mm2 \n\t"
588 "psubusb %%mm1, %%mm3 \n\t"
589 "psubusb %%mm1, %%mm4 \n\t"
590 "psubusb %%mm1, %%mm5 \n\t"
591 "movd %%mm2, %0 \n\t"
592 "movd %%mm3, %1 \n\t"
593 "movd %%mm4, %2 \n\t"
594 "movd %%mm5, %3 \n\t"
595 :"+m"(*(uint32_t *)(dest
+ 0 * stride
)),
596 "+m"(*(uint32_t *)(dest
+ 1 * stride
)),
597 "+m"(*(uint32_t *)(dest
+ 2 * stride
)),
598 "+m"(*(uint32_t *)(dest
+ 3 * stride
))
602 static void vc1_inv_trans_8x4_dc_mmxext(uint8_t *dest
, ptrdiff_t stride
,
606 dc
= ( 3 * dc
+ 1) >> 1;
607 dc
= (17 * dc
+ 64) >> 7;
609 "movd %0, %%mm0 \n\t"
610 "pshufw $0, %%mm0, %%mm0 \n\t"
611 "pxor %%mm1, %%mm1 \n\t"
612 "psubw %%mm0, %%mm1 \n\t"
613 "packuswb %%mm0, %%mm0 \n\t"
614 "packuswb %%mm1, %%mm1 \n\t"
618 "movq %0, %%mm2 \n\t"
619 "movq %1, %%mm3 \n\t"
620 "movq %2, %%mm4 \n\t"
621 "movq %3, %%mm5 \n\t"
622 "paddusb %%mm0, %%mm2 \n\t"
623 "paddusb %%mm0, %%mm3 \n\t"
624 "paddusb %%mm0, %%mm4 \n\t"
625 "paddusb %%mm0, %%mm5 \n\t"
626 "psubusb %%mm1, %%mm2 \n\t"
627 "psubusb %%mm1, %%mm3 \n\t"
628 "psubusb %%mm1, %%mm4 \n\t"
629 "psubusb %%mm1, %%mm5 \n\t"
630 "movq %%mm2, %0 \n\t"
631 "movq %%mm3, %1 \n\t"
632 "movq %%mm4, %2 \n\t"
633 "movq %%mm5, %3 \n\t"
634 :"+m"(*(uint32_t *)(dest
+ 0 * stride
)),
635 "+m"(*(uint32_t *)(dest
+ 1 * stride
)),
636 "+m"(*(uint32_t *)(dest
+ 2 * stride
)),
637 "+m"(*(uint32_t *)(dest
+ 3 * stride
))
641 static void vc1_inv_trans_8x8_dc_mmxext(uint8_t *dest
, ptrdiff_t stride
,
645 dc
= (3 * dc
+ 1) >> 1;
646 dc
= (3 * dc
+ 16) >> 5;
648 "movd %0, %%mm0 \n\t"
649 "pshufw $0, %%mm0, %%mm0 \n\t"
650 "pxor %%mm1, %%mm1 \n\t"
651 "psubw %%mm0, %%mm1 \n\t"
652 "packuswb %%mm0, %%mm0 \n\t"
653 "packuswb %%mm1, %%mm1 \n\t"
657 "movq %0, %%mm2 \n\t"
658 "movq %1, %%mm3 \n\t"
659 "movq %2, %%mm4 \n\t"
660 "movq %3, %%mm5 \n\t"
661 "paddusb %%mm0, %%mm2 \n\t"
662 "paddusb %%mm0, %%mm3 \n\t"
663 "paddusb %%mm0, %%mm4 \n\t"
664 "paddusb %%mm0, %%mm5 \n\t"
665 "psubusb %%mm1, %%mm2 \n\t"
666 "psubusb %%mm1, %%mm3 \n\t"
667 "psubusb %%mm1, %%mm4 \n\t"
668 "psubusb %%mm1, %%mm5 \n\t"
669 "movq %%mm2, %0 \n\t"
670 "movq %%mm3, %1 \n\t"
671 "movq %%mm4, %2 \n\t"
672 "movq %%mm5, %3 \n\t"
673 :"+m"(*(uint32_t *)(dest
+ 0 * stride
)),
674 "+m"(*(uint32_t *)(dest
+ 1 * stride
)),
675 "+m"(*(uint32_t *)(dest
+ 2 * stride
)),
676 "+m"(*(uint32_t *)(dest
+ 3 * stride
))
680 "movq %0, %%mm2 \n\t"
681 "movq %1, %%mm3 \n\t"
682 "movq %2, %%mm4 \n\t"
683 "movq %3, %%mm5 \n\t"
684 "paddusb %%mm0, %%mm2 \n\t"
685 "paddusb %%mm0, %%mm3 \n\t"
686 "paddusb %%mm0, %%mm4 \n\t"
687 "paddusb %%mm0, %%mm5 \n\t"
688 "psubusb %%mm1, %%mm2 \n\t"
689 "psubusb %%mm1, %%mm3 \n\t"
690 "psubusb %%mm1, %%mm4 \n\t"
691 "psubusb %%mm1, %%mm5 \n\t"
692 "movq %%mm2, %0 \n\t"
693 "movq %%mm3, %1 \n\t"
694 "movq %%mm4, %2 \n\t"
695 "movq %%mm5, %3 \n\t"
696 :"+m"(*(uint32_t *)(dest
+ 0 * stride
)),
697 "+m"(*(uint32_t *)(dest
+ 1 * stride
)),
698 "+m"(*(uint32_t *)(dest
+ 2 * stride
)),
699 "+m"(*(uint32_t *)(dest
+ 3 * stride
))
703 static void put_vc1_mspel_mc00_mmx(uint8_t *dst
, const uint8_t *src
,
704 ptrdiff_t stride
, int rnd
)
706 ff_put_pixels8_mmx(dst
, src
, stride
, 8);
709 av_cold
void ff_vc1dsp_init_mmx(VC1DSPContext
*dsp
)
711 dsp
->put_vc1_mspel_pixels_tab
[ 0] = put_vc1_mspel_mc00_mmx
;
712 dsp
->put_vc1_mspel_pixels_tab
[ 4] = put_vc1_mspel_mc01_mmx
;
713 dsp
->put_vc1_mspel_pixels_tab
[ 8] = put_vc1_mspel_mc02_mmx
;
714 dsp
->put_vc1_mspel_pixels_tab
[12] = put_vc1_mspel_mc03_mmx
;
716 dsp
->put_vc1_mspel_pixels_tab
[ 1] = put_vc1_mspel_mc10_mmx
;
717 dsp
->put_vc1_mspel_pixels_tab
[ 5] = put_vc1_mspel_mc11_mmx
;
718 dsp
->put_vc1_mspel_pixels_tab
[ 9] = put_vc1_mspel_mc12_mmx
;
719 dsp
->put_vc1_mspel_pixels_tab
[13] = put_vc1_mspel_mc13_mmx
;
721 dsp
->put_vc1_mspel_pixels_tab
[ 2] = put_vc1_mspel_mc20_mmx
;
722 dsp
->put_vc1_mspel_pixels_tab
[ 6] = put_vc1_mspel_mc21_mmx
;
723 dsp
->put_vc1_mspel_pixels_tab
[10] = put_vc1_mspel_mc22_mmx
;
724 dsp
->put_vc1_mspel_pixels_tab
[14] = put_vc1_mspel_mc23_mmx
;
726 dsp
->put_vc1_mspel_pixels_tab
[ 3] = put_vc1_mspel_mc30_mmx
;
727 dsp
->put_vc1_mspel_pixels_tab
[ 7] = put_vc1_mspel_mc31_mmx
;
728 dsp
->put_vc1_mspel_pixels_tab
[11] = put_vc1_mspel_mc32_mmx
;
729 dsp
->put_vc1_mspel_pixels_tab
[15] = put_vc1_mspel_mc33_mmx
;
732 av_cold
void ff_vc1dsp_init_mmxext(VC1DSPContext
*dsp
)
734 dsp
->avg_vc1_mspel_pixels_tab
[ 4] = avg_vc1_mspel_mc01_mmxext
;
735 dsp
->avg_vc1_mspel_pixels_tab
[ 8] = avg_vc1_mspel_mc02_mmxext
;
736 dsp
->avg_vc1_mspel_pixels_tab
[12] = avg_vc1_mspel_mc03_mmxext
;
738 dsp
->avg_vc1_mspel_pixels_tab
[ 1] = avg_vc1_mspel_mc10_mmxext
;
739 dsp
->avg_vc1_mspel_pixels_tab
[ 5] = avg_vc1_mspel_mc11_mmxext
;
740 dsp
->avg_vc1_mspel_pixels_tab
[ 9] = avg_vc1_mspel_mc12_mmxext
;
741 dsp
->avg_vc1_mspel_pixels_tab
[13] = avg_vc1_mspel_mc13_mmxext
;
743 dsp
->avg_vc1_mspel_pixels_tab
[ 2] = avg_vc1_mspel_mc20_mmxext
;
744 dsp
->avg_vc1_mspel_pixels_tab
[ 6] = avg_vc1_mspel_mc21_mmxext
;
745 dsp
->avg_vc1_mspel_pixels_tab
[10] = avg_vc1_mspel_mc22_mmxext
;
746 dsp
->avg_vc1_mspel_pixels_tab
[14] = avg_vc1_mspel_mc23_mmxext
;
748 dsp
->avg_vc1_mspel_pixels_tab
[ 3] = avg_vc1_mspel_mc30_mmxext
;
749 dsp
->avg_vc1_mspel_pixels_tab
[ 7] = avg_vc1_mspel_mc31_mmxext
;
750 dsp
->avg_vc1_mspel_pixels_tab
[11] = avg_vc1_mspel_mc32_mmxext
;
751 dsp
->avg_vc1_mspel_pixels_tab
[15] = avg_vc1_mspel_mc33_mmxext
;
753 dsp
->vc1_inv_trans_8x8_dc
= vc1_inv_trans_8x8_dc_mmxext
;
754 dsp
->vc1_inv_trans_4x8_dc
= vc1_inv_trans_4x8_dc_mmxext
;
755 dsp
->vc1_inv_trans_8x4_dc
= vc1_inv_trans_8x4_dc_mmxext
;
756 dsp
->vc1_inv_trans_4x4_dc
= vc1_inv_trans_4x4_dc_mmxext
;
758 #endif /* HAVE_INLINE_ASM */