2 * VC-1 and WMV3 - DSP functions MMX-optimized
3 * Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr>
5 * Permission is hereby granted, free of charge, to any person
6 * obtaining a copy of this software and associated documentation
7 * files (the "Software"), to deal in the Software without
8 * restriction, including without limitation the rights to use,
9 * copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
21 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
22 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "libavutil/cpu.h"
28 #include "libavutil/internal.h"
29 #include "libavutil/mem.h"
30 #include "libavutil/x86/asm.h"
31 #include "libavutil/x86/cpu.h"
32 #include "dsputil_mmx.h"
33 #include "libavcodec/vc1dsp.h"
39 #define OP_AVG(S,D) "pavgb " #S ", " #D " \n\t"
41 /** Add rounder from mm7 to mm3 and pack result at destination */
42 #define NORMALIZE_MMX(SHIFT) \
43 "paddw %%mm7, %%mm3 \n\t" /* +bias-r */ \
44 "paddw %%mm7, %%mm4 \n\t" /* +bias-r */ \
45 "psraw "SHIFT", %%mm3 \n\t" \
46 "psraw "SHIFT", %%mm4 \n\t"
48 #define TRANSFER_DO_PACK(OP) \
49 "packuswb %%mm4, %%mm3 \n\t" \
51 "movq %%mm3, (%2) \n\t"
53 #define TRANSFER_DONT_PACK(OP) \
56 "movq %%mm3, 0(%2) \n\t" \
57 "movq %%mm4, 8(%2) \n\t"
59 /** @see MSPEL_FILTER13_CORE for use as UNPACK macro */
60 #define DO_UNPACK(reg) "punpcklbw %%mm0, " reg "\n\t"
61 #define DONT_UNPACK(reg)
63 /** Compute the rounder 32-r or 8-r and unpacks it to mm7 */
64 #define LOAD_ROUNDER_MMX(ROUND) \
65 "movd "ROUND", %%mm7 \n\t" \
66 "punpcklwd %%mm7, %%mm7 \n\t" \
67 "punpckldq %%mm7, %%mm7 \n\t"
69 #define SHIFT2_LINE(OFF, R0,R1,R2,R3) \
70 "paddw %%mm"#R2", %%mm"#R1" \n\t" \
71 "movd (%0,%3), %%mm"#R0" \n\t" \
72 "pmullw %%mm6, %%mm"#R1" \n\t" \
73 "punpcklbw %%mm0, %%mm"#R0" \n\t" \
74 "movd (%0,%2), %%mm"#R3" \n\t" \
75 "psubw %%mm"#R0", %%mm"#R1" \n\t" \
76 "punpcklbw %%mm0, %%mm"#R3" \n\t" \
77 "paddw %%mm7, %%mm"#R1" \n\t" \
78 "psubw %%mm"#R3", %%mm"#R1" \n\t" \
79 "psraw %4, %%mm"#R1" \n\t" \
80 "movq %%mm"#R1", "#OFF"(%1) \n\t" \
83 /** Sacrifying mm6 allows to pipeline loads from src */
84 static void vc1_put_ver_16b_shift2_mmx(int16_t *dst
,
85 const uint8_t *src
, x86_reg stride
,
86 int rnd
, int64_t shift
)
89 "mov $3, %%"REG_c
" \n\t"
90 LOAD_ROUNDER_MMX("%5")
91 "movq "MANGLE(ff_pw_9
)", %%mm6 \n\t"
93 "movd (%0), %%mm2 \n\t"
95 "movd (%0), %%mm3 \n\t"
96 "punpcklbw %%mm0, %%mm2 \n\t"
97 "punpcklbw %%mm0, %%mm3 \n\t"
98 SHIFT2_LINE( 0, 1, 2, 3, 4)
99 SHIFT2_LINE( 24, 2, 3, 4, 1)
100 SHIFT2_LINE( 48, 3, 4, 1, 2)
101 SHIFT2_LINE( 72, 4, 1, 2, 3)
102 SHIFT2_LINE( 96, 1, 2, 3, 4)
103 SHIFT2_LINE(120, 2, 3, 4, 1)
104 SHIFT2_LINE(144, 3, 4, 1, 2)
105 SHIFT2_LINE(168, 4, 1, 2, 3)
110 : "+r"(src
), "+r"(dst
)
111 : "r"(stride
), "r"(-2*stride
),
112 "m"(shift
), "m"(rnd
), "r"(9*stride
-4)
118 * Data is already unpacked, so some operations can directly be made from
121 #define VC1_HOR_16b_SHIFT2(OP, OPNAME)\
122 static void OPNAME ## vc1_hor_16b_shift2_mmx(uint8_t *dst, x86_reg stride,\
123 const int16_t *src, int rnd)\
128 rnd -= (-1+9+9-1)*1024; /* Add -1024 bias */\
130 LOAD_ROUNDER_MMX("%4")\
131 "movq "MANGLE(ff_pw_128)", %%mm6\n\t"\
132 "movq "MANGLE(ff_pw_9)", %%mm5 \n\t"\
134 "movq 2*0+0(%1), %%mm1 \n\t"\
135 "movq 2*0+8(%1), %%mm2 \n\t"\
136 "movq 2*1+0(%1), %%mm3 \n\t"\
137 "movq 2*1+8(%1), %%mm4 \n\t"\
138 "paddw 2*3+0(%1), %%mm1 \n\t"\
139 "paddw 2*3+8(%1), %%mm2 \n\t"\
140 "paddw 2*2+0(%1), %%mm3 \n\t"\
141 "paddw 2*2+8(%1), %%mm4 \n\t"\
142 "pmullw %%mm5, %%mm3 \n\t"\
143 "pmullw %%mm5, %%mm4 \n\t"\
144 "psubw %%mm1, %%mm3 \n\t"\
145 "psubw %%mm2, %%mm4 \n\t"\
148 "paddw %%mm6, %%mm3 \n\t"\
149 "paddw %%mm6, %%mm4 \n\t"\
150 TRANSFER_DO_PACK(OP)\
155 : "+r"(h), "+r" (src), "+r" (dst)\
156 : "r"(stride), "m"(rnd)\
161 VC1_HOR_16b_SHIFT2(OP_PUT
, put_
)
162 VC1_HOR_16b_SHIFT2(OP_AVG
, avg_
)
166 * Purely vertical or horizontal 1/2 shift interpolation.
167 * Sacrify mm6 for *9 factor.
169 #define VC1_SHIFT2(OP, OPNAME)\
170 static void OPNAME ## vc1_shift2_mmx(uint8_t *dst, const uint8_t *src,\
171 x86_reg stride, int rnd, x86_reg offset)\
175 "mov $8, %%"REG_c" \n\t"\
176 LOAD_ROUNDER_MMX("%5")\
177 "movq "MANGLE(ff_pw_9)", %%mm6\n\t"\
179 "movd 0(%0 ), %%mm3 \n\t"\
180 "movd 4(%0 ), %%mm4 \n\t"\
181 "movd 0(%0,%2), %%mm1 \n\t"\
182 "movd 4(%0,%2), %%mm2 \n\t"\
184 "punpcklbw %%mm0, %%mm3 \n\t"\
185 "punpcklbw %%mm0, %%mm4 \n\t"\
186 "punpcklbw %%mm0, %%mm1 \n\t"\
187 "punpcklbw %%mm0, %%mm2 \n\t"\
188 "paddw %%mm1, %%mm3 \n\t"\
189 "paddw %%mm2, %%mm4 \n\t"\
190 "movd 0(%0,%3), %%mm1 \n\t"\
191 "movd 4(%0,%3), %%mm2 \n\t"\
192 "pmullw %%mm6, %%mm3 \n\t" /* 0,9,9,0*/\
193 "pmullw %%mm6, %%mm4 \n\t" /* 0,9,9,0*/\
194 "punpcklbw %%mm0, %%mm1 \n\t"\
195 "punpcklbw %%mm0, %%mm2 \n\t"\
196 "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,0*/\
197 "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,0*/\
198 "movd 0(%0,%2), %%mm1 \n\t"\
199 "movd 4(%0,%2), %%mm2 \n\t"\
200 "punpcklbw %%mm0, %%mm1 \n\t"\
201 "punpcklbw %%mm0, %%mm2 \n\t"\
202 "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,-1*/\
203 "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,-1*/\
205 "packuswb %%mm4, %%mm3 \n\t"\
207 "movq %%mm3, (%1) \n\t"\
210 "dec %%"REG_c" \n\t"\
212 : "+r"(src), "+r"(dst)\
213 : "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),\
215 : "%"REG_c, "memory"\
219 VC1_SHIFT2(OP_PUT
, put_
)
220 VC1_SHIFT2(OP_AVG
, avg_
)
223 * Core of the 1/4 and 3/4 shift bicubic interpolation.
225 * @param UNPACK Macro unpacking arguments from 8 to 16bits (can be empty).
226 * @param MOVQ "movd 1" or "movq 2", if data read is already unpacked.
227 * @param A1 Address of 1st tap (beware of unpacked/packed).
228 * @param A2 Address of 2nd tap
229 * @param A3 Address of 3rd tap
230 * @param A4 Address of 4th tap
232 #define MSPEL_FILTER13_CORE(UNPACK, MOVQ, A1, A2, A3, A4) \
233 MOVQ "*0+"A1", %%mm1 \n\t" \
234 MOVQ "*4+"A1", %%mm2 \n\t" \
237 "pmullw "MANGLE(ff_pw_3)", %%mm1\n\t" \
238 "pmullw "MANGLE(ff_pw_3)", %%mm2\n\t" \
239 MOVQ "*0+"A2", %%mm3 \n\t" \
240 MOVQ "*4+"A2", %%mm4 \n\t" \
243 "pmullw %%mm6, %%mm3 \n\t" /* *18 */ \
244 "pmullw %%mm6, %%mm4 \n\t" /* *18 */ \
245 "psubw %%mm1, %%mm3 \n\t" /* 18,-3 */ \
246 "psubw %%mm2, %%mm4 \n\t" /* 18,-3 */ \
247 MOVQ "*0+"A4", %%mm1 \n\t" \
248 MOVQ "*4+"A4", %%mm2 \n\t" \
251 "psllw $2, %%mm1 \n\t" /* 4* */ \
252 "psllw $2, %%mm2 \n\t" /* 4* */ \
253 "psubw %%mm1, %%mm3 \n\t" /* -4,18,-3 */ \
254 "psubw %%mm2, %%mm4 \n\t" /* -4,18,-3 */ \
255 MOVQ "*0+"A3", %%mm1 \n\t" \
256 MOVQ "*4+"A3", %%mm2 \n\t" \
259 "pmullw %%mm5, %%mm1 \n\t" /* *53 */ \
260 "pmullw %%mm5, %%mm2 \n\t" /* *53 */ \
261 "paddw %%mm1, %%mm3 \n\t" /* 4,53,18,-3 */ \
262 "paddw %%mm2, %%mm4 \n\t" /* 4,53,18,-3 */
265 * Macro to build the vertical 16bits version of vc1_put_shift[13].
266 * Here, offset=src_stride. Parameters passed A1 to A4 must use
267 * %3 (src_stride) and %4 (3*src_stride).
269 * @param NAME Either 1 or 3
270 * @see MSPEL_FILTER13_CORE for information on A1->A4
272 #define MSPEL_FILTER13_VER_16B(NAME, A1, A2, A3, A4) \
274 vc1_put_ver_16b_ ## NAME ## _mmx(int16_t *dst, const uint8_t *src, \
275 x86_reg src_stride, \
276 int rnd, int64_t shift) \
281 LOAD_ROUNDER_MMX("%5") \
282 "movq "MANGLE(ff_pw_53)", %%mm5\n\t" \
283 "movq "MANGLE(ff_pw_18)", %%mm6\n\t" \
286 MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \
287 NORMALIZE_MMX("%6") \
288 TRANSFER_DONT_PACK(OP_PUT) \
289 /* Last 3 (in fact 4) bytes on the line */ \
290 "movd 8+"A1", %%mm1 \n\t" \
292 "movq %%mm1, %%mm3 \n\t" \
293 "paddw %%mm1, %%mm1 \n\t" \
294 "paddw %%mm3, %%mm1 \n\t" /* 3* */ \
295 "movd 8+"A2", %%mm3 \n\t" \
297 "pmullw %%mm6, %%mm3 \n\t" /* *18 */ \
298 "psubw %%mm1, %%mm3 \n\t" /*18,-3 */ \
299 "movd 8+"A3", %%mm1 \n\t" \
301 "pmullw %%mm5, %%mm1 \n\t" /* *53 */ \
302 "paddw %%mm1, %%mm3 \n\t" /*53,18,-3 */ \
303 "movd 8+"A4", %%mm1 \n\t" \
305 "psllw $2, %%mm1 \n\t" /* 4* */ \
306 "psubw %%mm1, %%mm3 \n\t" \
307 "paddw %%mm7, %%mm3 \n\t" \
308 "psraw %6, %%mm3 \n\t" \
309 "movq %%mm3, 16(%2) \n\t" \
314 : "+r"(h), "+r" (src), "+r" (dst) \
315 : "r"(src_stride), "r"(3*src_stride), \
316 "m"(rnd), "m"(shift) \
322 * Macro to build the horizontal 16bits version of vc1_put_shift[13].
323 * Here, offset=16bits, so parameters passed A1 to A4 should be simple.
325 * @param NAME Either 1 or 3
326 * @see MSPEL_FILTER13_CORE for information on A1->A4
328 #define MSPEL_FILTER13_HOR_16B(NAME, A1, A2, A3, A4, OP, OPNAME) \
330 OPNAME ## vc1_hor_16b_ ## NAME ## _mmx(uint8_t *dst, x86_reg stride, \
331 const int16_t *src, int rnd) \
335 rnd -= (-4+58+13-3)*256; /* Add -256 bias */ \
337 LOAD_ROUNDER_MMX("%4") \
338 "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \
339 "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \
342 MSPEL_FILTER13_CORE(DONT_UNPACK, "movq 2", A1, A2, A3, A4) \
343 NORMALIZE_MMX("$7") \
345 "paddw "MANGLE(ff_pw_128)", %%mm3 \n\t" \
346 "paddw "MANGLE(ff_pw_128)", %%mm4 \n\t" \
347 TRANSFER_DO_PACK(OP) \
352 : "+r"(h), "+r" (src), "+r" (dst) \
353 : "r"(stride), "m"(rnd) \
359 * Macro to build the 8bits, any direction, version of vc1_put_shift[13].
360 * Here, offset=src_stride. Parameters passed A1 to A4 must use
361 * %3 (offset) and %4 (3*offset).
363 * @param NAME Either 1 or 3
364 * @see MSPEL_FILTER13_CORE for information on A1->A4
366 #define MSPEL_FILTER13_8B(NAME, A1, A2, A3, A4, OP, OPNAME) \
368 OPNAME ## vc1_## NAME ## _mmx(uint8_t *dst, const uint8_t *src, \
369 x86_reg stride, int rnd, x86_reg offset) \
375 LOAD_ROUNDER_MMX("%6") \
376 "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \
377 "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \
380 MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \
381 NORMALIZE_MMX("$6") \
382 TRANSFER_DO_PACK(OP) \
387 : "+r"(h), "+r" (src), "+r" (dst) \
388 : "r"(offset), "r"(3*offset), "g"(stride), "m"(rnd) \
393 /** 1/4 shift bicubic interpolation */
394 MSPEL_FILTER13_8B (shift1
, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_PUT
, put_
)
395 MSPEL_FILTER13_8B (shift1
, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_AVG
, avg_
)
396 MSPEL_FILTER13_VER_16B(shift1
, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )")
397 MSPEL_FILTER13_HOR_16B(shift1
, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_PUT
, put_
)
398 MSPEL_FILTER13_HOR_16B(shift1
, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_AVG
, avg_
)
400 /** 3/4 shift bicubic interpolation */
401 MSPEL_FILTER13_8B (shift3
, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_PUT
, put_
)
402 MSPEL_FILTER13_8B (shift3
, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_AVG
, avg_
)
403 MSPEL_FILTER13_VER_16B(shift3
, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )")
404 MSPEL_FILTER13_HOR_16B(shift3
, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_PUT
, put_
)
405 MSPEL_FILTER13_HOR_16B(shift3
, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_AVG
, avg_
)
407 typedef void (*vc1_mspel_mc_filter_ver_16bits
)(int16_t *dst
, const uint8_t *src
, x86_reg src_stride
, int rnd
, int64_t shift
);
408 typedef void (*vc1_mspel_mc_filter_hor_16bits
)(uint8_t *dst
, x86_reg dst_stride
, const int16_t *src
, int rnd
);
409 typedef void (*vc1_mspel_mc_filter_8bits
)(uint8_t *dst
, const uint8_t *src
, x86_reg stride
, int rnd
, x86_reg offset
);
412 * Interpolate fractional pel values by applying proper vertical then
415 * @param dst Destination buffer for interpolated pels.
416 * @param src Source buffer.
417 * @param stride Stride for both src and dst buffers.
418 * @param hmode Horizontal filter (expressed in quarter pixels shift).
419 * @param hmode Vertical filter.
420 * @param rnd Rounding bias.
422 #define VC1_MSPEL_MC(OP)\
423 static void OP ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,\
424 int hmode, int vmode, int rnd)\
426 static const vc1_mspel_mc_filter_ver_16bits vc1_put_shift_ver_16bits[] =\
427 { NULL, vc1_put_ver_16b_shift1_mmx, vc1_put_ver_16b_shift2_mmx, vc1_put_ver_16b_shift3_mmx };\
428 static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] =\
429 { NULL, OP ## vc1_hor_16b_shift1_mmx, OP ## vc1_hor_16b_shift2_mmx, OP ## vc1_hor_16b_shift3_mmx };\
430 static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] =\
431 { NULL, OP ## vc1_shift1_mmx, OP ## vc1_shift2_mmx, OP ## vc1_shift3_mmx };\
434 "pxor %%mm0, %%mm0 \n\t"\
438 if (vmode) { /* Vertical filter to apply */\
439 if (hmode) { /* Horizontal filter to apply, output to tmp */\
440 static const int shift_value[] = { 0, 5, 1, 5 };\
441 int shift = (shift_value[hmode]+shift_value[vmode])>>1;\
443 DECLARE_ALIGNED(16, int16_t, tmp)[12*8];\
445 r = (1<<(shift-1)) + rnd-1;\
446 vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);\
448 vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd);\
451 else { /* No horizontal filter, output 8 lines to dst */\
452 vc1_put_shift_8bits[vmode](dst, src, stride, 1-rnd, stride);\
457 /* Horizontal mode with no vertical mode */\
458 vc1_put_shift_8bits[hmode](dst, src, stride, rnd, 1);\
464 /** Macro to ease bicubic filter interpolation functions declarations */
465 #define DECLARE_FUNCTION(a, b) \
466 static void put_vc1_mspel_mc ## a ## b ## _mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
467 put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
469 static void avg_vc1_mspel_mc ## a ## b ## _mmxext(uint8_t *dst, \
470 const uint8_t *src, \
471 int stride, int rnd) \
473 avg_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
476 DECLARE_FUNCTION(0, 1)
477 DECLARE_FUNCTION(0, 2)
478 DECLARE_FUNCTION(0, 3)
480 DECLARE_FUNCTION(1, 0)
481 DECLARE_FUNCTION(1, 1)
482 DECLARE_FUNCTION(1, 2)
483 DECLARE_FUNCTION(1, 3)
485 DECLARE_FUNCTION(2, 0)
486 DECLARE_FUNCTION(2, 1)
487 DECLARE_FUNCTION(2, 2)
488 DECLARE_FUNCTION(2, 3)
490 DECLARE_FUNCTION(3, 0)
491 DECLARE_FUNCTION(3, 1)
492 DECLARE_FUNCTION(3, 2)
493 DECLARE_FUNCTION(3, 3)
495 static void vc1_inv_trans_4x4_dc_mmxext(uint8_t *dest
, int linesize
,
499 dc
= (17 * dc
+ 4) >> 3;
500 dc
= (17 * dc
+ 64) >> 7;
502 "movd %0, %%mm0 \n\t"
503 "pshufw $0, %%mm0, %%mm0 \n\t"
504 "pxor %%mm1, %%mm1 \n\t"
505 "psubw %%mm0, %%mm1 \n\t"
506 "packuswb %%mm0, %%mm0 \n\t"
507 "packuswb %%mm1, %%mm1 \n\t"
511 "movd %0, %%mm2 \n\t"
512 "movd %1, %%mm3 \n\t"
513 "movd %2, %%mm4 \n\t"
514 "movd %3, %%mm5 \n\t"
515 "paddusb %%mm0, %%mm2 \n\t"
516 "paddusb %%mm0, %%mm3 \n\t"
517 "paddusb %%mm0, %%mm4 \n\t"
518 "paddusb %%mm0, %%mm5 \n\t"
519 "psubusb %%mm1, %%mm2 \n\t"
520 "psubusb %%mm1, %%mm3 \n\t"
521 "psubusb %%mm1, %%mm4 \n\t"
522 "psubusb %%mm1, %%mm5 \n\t"
523 "movd %%mm2, %0 \n\t"
524 "movd %%mm3, %1 \n\t"
525 "movd %%mm4, %2 \n\t"
526 "movd %%mm5, %3 \n\t"
527 :"+m"(*(uint32_t*)(dest
+0*linesize
)),
528 "+m"(*(uint32_t*)(dest
+1*linesize
)),
529 "+m"(*(uint32_t*)(dest
+2*linesize
)),
530 "+m"(*(uint32_t*)(dest
+3*linesize
))
534 static void vc1_inv_trans_4x8_dc_mmxext(uint8_t *dest
, int linesize
,
538 dc
= (17 * dc
+ 4) >> 3;
539 dc
= (12 * dc
+ 64) >> 7;
541 "movd %0, %%mm0 \n\t"
542 "pshufw $0, %%mm0, %%mm0 \n\t"
543 "pxor %%mm1, %%mm1 \n\t"
544 "psubw %%mm0, %%mm1 \n\t"
545 "packuswb %%mm0, %%mm0 \n\t"
546 "packuswb %%mm1, %%mm1 \n\t"
550 "movd %0, %%mm2 \n\t"
551 "movd %1, %%mm3 \n\t"
552 "movd %2, %%mm4 \n\t"
553 "movd %3, %%mm5 \n\t"
554 "paddusb %%mm0, %%mm2 \n\t"
555 "paddusb %%mm0, %%mm3 \n\t"
556 "paddusb %%mm0, %%mm4 \n\t"
557 "paddusb %%mm0, %%mm5 \n\t"
558 "psubusb %%mm1, %%mm2 \n\t"
559 "psubusb %%mm1, %%mm3 \n\t"
560 "psubusb %%mm1, %%mm4 \n\t"
561 "psubusb %%mm1, %%mm5 \n\t"
562 "movd %%mm2, %0 \n\t"
563 "movd %%mm3, %1 \n\t"
564 "movd %%mm4, %2 \n\t"
565 "movd %%mm5, %3 \n\t"
566 :"+m"(*(uint32_t*)(dest
+0*linesize
)),
567 "+m"(*(uint32_t*)(dest
+1*linesize
)),
568 "+m"(*(uint32_t*)(dest
+2*linesize
)),
569 "+m"(*(uint32_t*)(dest
+3*linesize
))
573 "movd %0, %%mm2 \n\t"
574 "movd %1, %%mm3 \n\t"
575 "movd %2, %%mm4 \n\t"
576 "movd %3, %%mm5 \n\t"
577 "paddusb %%mm0, %%mm2 \n\t"
578 "paddusb %%mm0, %%mm3 \n\t"
579 "paddusb %%mm0, %%mm4 \n\t"
580 "paddusb %%mm0, %%mm5 \n\t"
581 "psubusb %%mm1, %%mm2 \n\t"
582 "psubusb %%mm1, %%mm3 \n\t"
583 "psubusb %%mm1, %%mm4 \n\t"
584 "psubusb %%mm1, %%mm5 \n\t"
585 "movd %%mm2, %0 \n\t"
586 "movd %%mm3, %1 \n\t"
587 "movd %%mm4, %2 \n\t"
588 "movd %%mm5, %3 \n\t"
589 :"+m"(*(uint32_t*)(dest
+0*linesize
)),
590 "+m"(*(uint32_t*)(dest
+1*linesize
)),
591 "+m"(*(uint32_t*)(dest
+2*linesize
)),
592 "+m"(*(uint32_t*)(dest
+3*linesize
))
596 static void vc1_inv_trans_8x4_dc_mmxext(uint8_t *dest
, int linesize
,
600 dc
= ( 3 * dc
+ 1) >> 1;
601 dc
= (17 * dc
+ 64) >> 7;
603 "movd %0, %%mm0 \n\t"
604 "pshufw $0, %%mm0, %%mm0 \n\t"
605 "pxor %%mm1, %%mm1 \n\t"
606 "psubw %%mm0, %%mm1 \n\t"
607 "packuswb %%mm0, %%mm0 \n\t"
608 "packuswb %%mm1, %%mm1 \n\t"
612 "movq %0, %%mm2 \n\t"
613 "movq %1, %%mm3 \n\t"
614 "movq %2, %%mm4 \n\t"
615 "movq %3, %%mm5 \n\t"
616 "paddusb %%mm0, %%mm2 \n\t"
617 "paddusb %%mm0, %%mm3 \n\t"
618 "paddusb %%mm0, %%mm4 \n\t"
619 "paddusb %%mm0, %%mm5 \n\t"
620 "psubusb %%mm1, %%mm2 \n\t"
621 "psubusb %%mm1, %%mm3 \n\t"
622 "psubusb %%mm1, %%mm4 \n\t"
623 "psubusb %%mm1, %%mm5 \n\t"
624 "movq %%mm2, %0 \n\t"
625 "movq %%mm3, %1 \n\t"
626 "movq %%mm4, %2 \n\t"
627 "movq %%mm5, %3 \n\t"
628 :"+m"(*(uint32_t*)(dest
+0*linesize
)),
629 "+m"(*(uint32_t*)(dest
+1*linesize
)),
630 "+m"(*(uint32_t*)(dest
+2*linesize
)),
631 "+m"(*(uint32_t*)(dest
+3*linesize
))
635 static void vc1_inv_trans_8x8_dc_mmxext(uint8_t *dest
, int linesize
,
639 dc
= (3 * dc
+ 1) >> 1;
640 dc
= (3 * dc
+ 16) >> 5;
642 "movd %0, %%mm0 \n\t"
643 "pshufw $0, %%mm0, %%mm0 \n\t"
644 "pxor %%mm1, %%mm1 \n\t"
645 "psubw %%mm0, %%mm1 \n\t"
646 "packuswb %%mm0, %%mm0 \n\t"
647 "packuswb %%mm1, %%mm1 \n\t"
651 "movq %0, %%mm2 \n\t"
652 "movq %1, %%mm3 \n\t"
653 "movq %2, %%mm4 \n\t"
654 "movq %3, %%mm5 \n\t"
655 "paddusb %%mm0, %%mm2 \n\t"
656 "paddusb %%mm0, %%mm3 \n\t"
657 "paddusb %%mm0, %%mm4 \n\t"
658 "paddusb %%mm0, %%mm5 \n\t"
659 "psubusb %%mm1, %%mm2 \n\t"
660 "psubusb %%mm1, %%mm3 \n\t"
661 "psubusb %%mm1, %%mm4 \n\t"
662 "psubusb %%mm1, %%mm5 \n\t"
663 "movq %%mm2, %0 \n\t"
664 "movq %%mm3, %1 \n\t"
665 "movq %%mm4, %2 \n\t"
666 "movq %%mm5, %3 \n\t"
667 :"+m"(*(uint32_t*)(dest
+0*linesize
)),
668 "+m"(*(uint32_t*)(dest
+1*linesize
)),
669 "+m"(*(uint32_t*)(dest
+2*linesize
)),
670 "+m"(*(uint32_t*)(dest
+3*linesize
))
674 "movq %0, %%mm2 \n\t"
675 "movq %1, %%mm3 \n\t"
676 "movq %2, %%mm4 \n\t"
677 "movq %3, %%mm5 \n\t"
678 "paddusb %%mm0, %%mm2 \n\t"
679 "paddusb %%mm0, %%mm3 \n\t"
680 "paddusb %%mm0, %%mm4 \n\t"
681 "paddusb %%mm0, %%mm5 \n\t"
682 "psubusb %%mm1, %%mm2 \n\t"
683 "psubusb %%mm1, %%mm3 \n\t"
684 "psubusb %%mm1, %%mm4 \n\t"
685 "psubusb %%mm1, %%mm5 \n\t"
686 "movq %%mm2, %0 \n\t"
687 "movq %%mm3, %1 \n\t"
688 "movq %%mm4, %2 \n\t"
689 "movq %%mm5, %3 \n\t"
690 :"+m"(*(uint32_t*)(dest
+0*linesize
)),
691 "+m"(*(uint32_t*)(dest
+1*linesize
)),
692 "+m"(*(uint32_t*)(dest
+2*linesize
)),
693 "+m"(*(uint32_t*)(dest
+3*linesize
))
697 av_cold
void ff_vc1dsp_init_mmx(VC1DSPContext
*dsp
)
699 dsp
->put_vc1_mspel_pixels_tab
[ 0] = ff_put_vc1_mspel_mc00_mmx
;
700 dsp
->put_vc1_mspel_pixels_tab
[ 4] = put_vc1_mspel_mc01_mmx
;
701 dsp
->put_vc1_mspel_pixels_tab
[ 8] = put_vc1_mspel_mc02_mmx
;
702 dsp
->put_vc1_mspel_pixels_tab
[12] = put_vc1_mspel_mc03_mmx
;
704 dsp
->put_vc1_mspel_pixels_tab
[ 1] = put_vc1_mspel_mc10_mmx
;
705 dsp
->put_vc1_mspel_pixels_tab
[ 5] = put_vc1_mspel_mc11_mmx
;
706 dsp
->put_vc1_mspel_pixels_tab
[ 9] = put_vc1_mspel_mc12_mmx
;
707 dsp
->put_vc1_mspel_pixels_tab
[13] = put_vc1_mspel_mc13_mmx
;
709 dsp
->put_vc1_mspel_pixels_tab
[ 2] = put_vc1_mspel_mc20_mmx
;
710 dsp
->put_vc1_mspel_pixels_tab
[ 6] = put_vc1_mspel_mc21_mmx
;
711 dsp
->put_vc1_mspel_pixels_tab
[10] = put_vc1_mspel_mc22_mmx
;
712 dsp
->put_vc1_mspel_pixels_tab
[14] = put_vc1_mspel_mc23_mmx
;
714 dsp
->put_vc1_mspel_pixels_tab
[ 3] = put_vc1_mspel_mc30_mmx
;
715 dsp
->put_vc1_mspel_pixels_tab
[ 7] = put_vc1_mspel_mc31_mmx
;
716 dsp
->put_vc1_mspel_pixels_tab
[11] = put_vc1_mspel_mc32_mmx
;
717 dsp
->put_vc1_mspel_pixels_tab
[15] = put_vc1_mspel_mc33_mmx
;
720 av_cold
void ff_vc1dsp_init_mmxext(VC1DSPContext
*dsp
)
722 dsp
->avg_vc1_mspel_pixels_tab
[ 4] = avg_vc1_mspel_mc01_mmxext
;
723 dsp
->avg_vc1_mspel_pixels_tab
[ 8] = avg_vc1_mspel_mc02_mmxext
;
724 dsp
->avg_vc1_mspel_pixels_tab
[12] = avg_vc1_mspel_mc03_mmxext
;
726 dsp
->avg_vc1_mspel_pixels_tab
[ 1] = avg_vc1_mspel_mc10_mmxext
;
727 dsp
->avg_vc1_mspel_pixels_tab
[ 5] = avg_vc1_mspel_mc11_mmxext
;
728 dsp
->avg_vc1_mspel_pixels_tab
[ 9] = avg_vc1_mspel_mc12_mmxext
;
729 dsp
->avg_vc1_mspel_pixels_tab
[13] = avg_vc1_mspel_mc13_mmxext
;
731 dsp
->avg_vc1_mspel_pixels_tab
[ 2] = avg_vc1_mspel_mc20_mmxext
;
732 dsp
->avg_vc1_mspel_pixels_tab
[ 6] = avg_vc1_mspel_mc21_mmxext
;
733 dsp
->avg_vc1_mspel_pixels_tab
[10] = avg_vc1_mspel_mc22_mmxext
;
734 dsp
->avg_vc1_mspel_pixels_tab
[14] = avg_vc1_mspel_mc23_mmxext
;
736 dsp
->avg_vc1_mspel_pixels_tab
[ 3] = avg_vc1_mspel_mc30_mmxext
;
737 dsp
->avg_vc1_mspel_pixels_tab
[ 7] = avg_vc1_mspel_mc31_mmxext
;
738 dsp
->avg_vc1_mspel_pixels_tab
[11] = avg_vc1_mspel_mc32_mmxext
;
739 dsp
->avg_vc1_mspel_pixels_tab
[15] = avg_vc1_mspel_mc33_mmxext
;
741 dsp
->vc1_inv_trans_8x8_dc
= vc1_inv_trans_8x8_dc_mmxext
;
742 dsp
->vc1_inv_trans_4x8_dc
= vc1_inv_trans_4x8_dc_mmxext
;
743 dsp
->vc1_inv_trans_8x4_dc
= vc1_inv_trans_8x4_dc_mmxext
;
744 dsp
->vc1_inv_trans_4x4_dc
= vc1_inv_trans_4x4_dc_mmxext
;
746 #endif /* HAVE_INLINE_ASM */