1 ;******************************************************************************
2 ;* VP8 MMXEXT optimizations
3 ;* Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com>
4 ;* Copyright (c) 2010 Fiona Glaser <fiona@x264.com>
6 ;* This file is part of Libav.
8 ;* Libav is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* Libav is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with Libav; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "libavutil/x86/x86util.asm"
27 fourtap_filter_hw_m: times
4 dw -6, 123
36 sixtap_filter_hw_m: times
4 dw 2, -11
46 fourtap_filter_hb_m: times
8 db -6, 123
55 sixtap_filter_hb_m: times
8 db 2, 1
65 fourtap_filter_v_m: times
8 dw -6
82 sixtap_filter_v_m: times
8 dw 2
101 bilinear_filter_vw_m: times
8 dw 1
109 bilinear_filter_vb_m: times
8 db 7, 1
118 %define fourtap_filter_hw picregq
119 %define sixtap_filter_hw picregq
120 %define fourtap_filter_hb picregq
121 %define sixtap_filter_hb picregq
122 %define fourtap_filter_v picregq
123 %define sixtap_filter_v picregq
124 %define bilinear_filter_vw picregq
125 %define bilinear_filter_vb picregq
128 %define fourtap_filter_hw fourtap_filter_hw_m
129 %define sixtap_filter_hw sixtap_filter_hw_m
130 %define fourtap_filter_hb fourtap_filter_hb_m
131 %define sixtap_filter_hb sixtap_filter_hb_m
132 %define fourtap_filter_v fourtap_filter_v_m
133 %define sixtap_filter_v sixtap_filter_v_m
134 %define bilinear_filter_vw bilinear_filter_vw_m
135 %define bilinear_filter_vb bilinear_filter_vb_m
139 filter_h2_shuf: db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
140 filter_h4_shuf: db 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10
142 filter_h6_shuf1: db 0, 5, 1, 6, 2, 7, 3, 8, 4, 9, 5, 10, 6, 11, 7, 12
143 filter_h6_shuf2: db 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9
144 filter_h6_shuf3: db 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11
146 pw_20091: times
4 dw 20091
147 pw_17734: times
4 dw 17734
156 ;-------------------------------------------------------------------------------
157 ; subpel MC functions:
159 ; void ff_put_vp8_epel<size>_h<htap>v<vtap>_<opt>(uint8_t *dst, ptrdiff_t deststride,
160 ; uint8_t *src, ptrdiff_t srcstride,
161 ; int height, int mx, int my);
162 ;-------------------------------------------------------------------------------
164 %macro FILTER_SSSE3
1
165 cglobal put_vp8_epel
%1_h6
, 6, 6 + npicregs
, 8, dst
, dststride
, src
, srcstride
, height
, mx
, picreg
167 mova m3
, [filter_h6_shuf2
]
168 mova m4
, [filter_h6_shuf3
]
170 lea picregq
, [sixtap_filter_hb_m
]
172 mova m5
, [sixtap_filter_hb
+mxq
*8-48] ; set up 6tap filter in bytes
173 mova m6
, [sixtap_filter_hb
+mxq
*8-32]
174 mova m7
, [sixtap_filter_hb
+mxq
*8-16]
181 ; For epel4, we need 9 bytes, but only 8 get loaded; to compensate, do the
182 ; shuffle with a memory operand
183 punpcklbw m0
, [srcq
+3]
185 pshufb m0
, [filter_h6_shuf1
]
194 pmulhrsw m0
, [pw_256
]
196 movh
[dstq
], m0
; store
201 dec heightd
; next row
205 cglobal put_vp8_epel
%1_h4
, 6, 6 + npicregs
, 7, dst
, dststride
, src
, srcstride
, height
, mx
, picreg
208 mova m3
, [filter_h2_shuf
]
209 mova m4
, [filter_h4_shuf
]
211 lea picregq
, [fourtap_filter_hb_m
]
213 mova m5
, [fourtap_filter_hb
+mxq
-16] ; set up 4tap filter in bytes
214 mova m6
, [fourtap_filter_hb
+mxq
]
226 movh
[dstq
], m0
; store
231 dec heightd
; next row
235 cglobal put_vp8_epel
%1_v4
, 7, 7, 8, dst
, dststride
, src
, srcstride
, height
, picreg
, my
238 lea picregq
, [fourtap_filter_hb_m
]
240 mova m5
, [fourtap_filter_hb
+myq
-16]
241 mova m6
, [fourtap_filter_hb
+myq
]
247 movh m1
, [srcq
+ srcstrideq
]
248 movh m2
, [srcq
+2*srcstrideq
]
252 movh m3
, [srcq
+2*srcstrideq
] ; read new row
269 dec heightd
; next row
273 cglobal put_vp8_epel
%1_v6
, 7, 7, 8, dst
, dststride
, src
, srcstride
, height
, picreg
, my
276 lea picregq
, [sixtap_filter_hb_m
]
278 lea myq
, [sixtap_filter_hb
+myq
*8]
284 movh m1
, [srcq
+srcstrideq
]
285 movh m2
, [srcq
+srcstrideq
*2]
286 lea srcq
, [srcq
+srcstrideq
*2]
289 movh m4
, [srcq
+srcstrideq
]
292 movh m5
, [srcq
+2*srcstrideq
] ; read new row
299 pmaddubsw m6
, [myq
-48]
300 pmaddubsw m1
, [myq
-32]
301 pmaddubsw m7
, [myq
-16]
306 pmulhrsw m6
, [pw_256
]
315 dec heightd
; next row
325 ; 4x4 block, H-only 4-tap filter
327 cglobal put_vp8_epel4_h4
, 6, 6 + npicregs
, 0, dst
, dststride
, src
, srcstride
, height
, mx
, picreg
330 lea picregq
, [fourtap_filter_hw_m
]
332 movq mm4
, [fourtap_filter_hw
+mxq
-16] ; set up 4tap filter in words
333 movq mm5
, [fourtap_filter_hw
+mxq
]
338 movq mm1
, [srcq
-1] ; (ABCDEFGH) load 8 horizontal pixels
340 ; first set of 2 pixels
341 movq mm2
, mm1
; byte ABCD..
342 punpcklbw mm1
, mm6
; byte->word ABCD
343 pshufw mm0
, mm2
, 9 ; byte CDEF..
344 punpcklbw mm0
, mm6
; byte->word CDEF
345 pshufw mm3
, mm1
, 0x94 ; word ABBC
346 pshufw mm1
, mm0
, 0x94 ; word CDDE
347 pmaddwd mm3
, mm4
; multiply 2px with F0/F1
348 movq mm0
, mm1
; backup for second set of pixels
349 pmaddwd mm1
, mm5
; multiply 2px with F2/F3
350 paddd mm3
, mm1
; finish 1st 2px
352 ; second set of 2 pixels, use backup of above
353 punpckhbw mm2
, mm6
; byte->word EFGH
354 pmaddwd mm0
, mm4
; multiply backed up 2px with F0/F1
355 pshufw mm1
, mm2
, 0x94 ; word EFFG
356 pmaddwd mm1
, mm5
; multiply 2px with F2/F3
357 paddd mm0
, mm1
; finish 2nd 2px
359 ; merge two sets of 2 pixels into one set of 4, round/clip/store
360 packssdw mm3
, mm0
; merge dword->word (4px)
361 paddsw mm3
, mm7
; rounding
363 packuswb mm3
, mm6
; clip and word->bytes
364 movd
[dstq
], mm3
; store
369 dec heightd
; next row
373 ; 4x4 block, H-only 6-tap filter
375 cglobal put_vp8_epel4_h6
, 6, 6 + npicregs
, 0, dst
, dststride
, src
, srcstride
, height
, mx
, picreg
378 lea picregq
, [sixtap_filter_hw_m
]
380 movq mm4
, [sixtap_filter_hw
+mxq
*8-48] ; set up 4tap filter in words
381 movq mm5
, [sixtap_filter_hw
+mxq
*8-32]
382 movq mm6
, [sixtap_filter_hw
+mxq
*8-16]
387 movq mm1
, [srcq
-2] ; (ABCDEFGH) load 8 horizontal pixels
389 ; first set of 2 pixels
390 movq mm2
, mm1
; byte ABCD..
391 punpcklbw mm1
, mm3
; byte->word ABCD
392 pshufw mm0
, mm2
, 0x9 ; byte CDEF..
393 punpckhbw mm2
, mm3
; byte->word EFGH
394 punpcklbw mm0
, mm3
; byte->word CDEF
395 pshufw mm1
, mm1
, 0x94 ; word ABBC
396 pshufw mm2
, mm2
, 0x94 ; word EFFG
397 pmaddwd mm1
, mm4
; multiply 2px with F0/F1
398 pshufw mm3
, mm0
, 0x94 ; word CDDE
399 movq mm0
, mm3
; backup for second set of pixels
400 pmaddwd mm3
, mm5
; multiply 2px with F2/F3
401 paddd mm1
, mm3
; add to 1st 2px cache
402 movq mm3
, mm2
; backup for second set of pixels
403 pmaddwd mm2
, mm6
; multiply 2px with F4/F5
404 paddd mm1
, mm2
; finish 1st 2px
406 ; second set of 2 pixels, use backup of above
407 movd mm2
, [srcq
+3] ; byte FGHI (prevent overreads)
408 pmaddwd mm0
, mm4
; multiply 1st backed up 2px with F0/F1
409 pmaddwd mm3
, mm5
; multiply 2nd backed up 2px with F2/F3
410 paddd mm0
, mm3
; add to 2nd 2px cache
412 punpcklbw mm2
, mm3
; byte->word FGHI
413 pshufw mm2
, mm2
, 0xE9 ; word GHHI
414 pmaddwd mm2
, mm6
; multiply 2px with F4/F5
415 paddd mm0
, mm2
; finish 2nd 2px
417 ; merge two sets of 2 pixels into one set of 4, round/clip/store
418 packssdw mm1
, mm0
; merge dword->word (4px)
419 paddsw mm1
, mm7
; rounding
421 packuswb mm1
, mm3
; clip and word->bytes
422 movd
[dstq
], mm1
; store
427 dec heightd
; next row
432 cglobal put_vp8_epel8_h4
, 6, 6 + npicregs
, 10, dst
, dststride
, src
, srcstride
, height
, mx
, picreg
435 lea picregq
, [fourtap_filter_v_m
]
437 lea mxq
, [fourtap_filter_v
+mxq
-32]
470 movh
[dstq
], m0
; store
475 dec heightd
; next row
480 cglobal put_vp8_epel8_h6
, 6, 6 + npicregs
, 14, dst
, dststride
, src
, srcstride
, height
, mx
, picreg
484 lea picregq
, [sixtap_filter_v_m
]
486 lea mxq
, [sixtap_filter_v
+mxq
-96]
533 movh
[dstq
], m0
; store
538 dec heightd
; next row
543 ; 4x4 block, V-only 4-tap filter
544 cglobal put_vp8_epel
%1_v4
, 7, 7, 8, dst
, dststride
, src
, srcstride
, height
, picreg
, my
547 lea picregq
, [fourtap_filter_v_m
]
549 lea myq
, [fourtap_filter_v
+myq
-32]
557 movh m1
, [srcq
+ srcstrideq
]
558 movh m2
, [srcq
+2*srcstrideq
]
565 ; first calculate negative taps (to prevent losing positive overflows)
566 movh m4
, [srcq
+2*srcstrideq
] ; read new row
573 ; then calculate positive taps
591 dec heightd
; next row
596 ; 4x4 block, V-only 6-tap filter
597 cglobal put_vp8_epel
%1_v6
, 7, 7, 8, dst
, dststride
, src
, srcstride
, height
, picreg
, my
601 lea picregq
, [sixtap_filter_v_m
]
603 lea myq
, [sixtap_filter_v
+myq
-96]
610 movh m1
, [srcq
+srcstrideq
]
611 movh m2
, [srcq
+srcstrideq
*2]
612 lea srcq
, [srcq
+srcstrideq
*2]
615 movh m4
, [srcq
+srcstrideq
]
623 ; first calculate negative taps (to prevent losing positive overflows)
630 ; then calculate positive taps
631 movh m5
, [srcq
+2*srcstrideq
] ; read new row
656 dec heightd
; next row
666 %macro FILTER_BILINEAR
1
668 cglobal put_vp8_bilinear
%1_v
, 7, 7, 5, dst
, dststride
, src
, srcstride
, height
, picreg
, my
671 lea picregq
, [bilinear_filter_vb_m
]
674 mova m3
, [bilinear_filter_vb
+myq
-16]
676 movh m0
, [srcq
+srcstrideq
*0]
677 movh m1
, [srcq
+srcstrideq
*1]
678 movh m2
, [srcq
+srcstrideq
*2]
690 movh
[dstq
+dststrideq
*0], m0
691 movh
[dstq
+dststrideq
*1], m1
694 movh
[dstq
+dststrideq
*0], m0
695 movhps
[dstq
+dststrideq
*1], m0
697 %else
; cpuflag(ssse3)
698 cglobal put_vp8_bilinear
%1_v
, 7, 7, 7, dst
, dststride
, src
, srcstride
, height
, picreg
, my
701 lea picregq
, [bilinear_filter_vw_m
]
704 mova m5
, [bilinear_filter_vw
+myq
-1*16]
706 mova m4
, [bilinear_filter_vw
+myq
+7*16]
708 movh m0
, [srcq
+srcstrideq
*0]
709 movh m1
, [srcq
+srcstrideq
*1]
710 movh m3
, [srcq
+srcstrideq
*2]
728 movh
[dstq
+dststrideq
*0], m0
729 movh
[dstq
+dststrideq
*1], m2
732 movh
[dstq
+dststrideq
*0], m0
733 movhps
[dstq
+dststrideq
*1], m0
735 %endif
; cpuflag(ssse3)
737 lea dstq
, [dstq
+dststrideq
*2]
738 lea srcq
, [srcq
+srcstrideq
*2]
744 cglobal put_vp8_bilinear
%1_h
, 6, 6 + npicregs
, 5, dst
, dststride
, src
, srcstride
, height
, mx
, picreg
747 lea picregq
, [bilinear_filter_vb_m
]
750 mova m2
, [filter_h2_shuf
]
751 mova m3
, [bilinear_filter_vb
+mxq
-16]
753 movu m0
, [srcq
+srcstrideq
*0]
754 movu m1
, [srcq
+srcstrideq
*1]
766 movh
[dstq
+dststrideq
*0], m0
767 movh
[dstq
+dststrideq
*1], m1
770 movh
[dstq
+dststrideq
*0], m0
771 movhps
[dstq
+dststrideq
*1], m0
773 %else
; cpuflag(ssse3)
774 cglobal put_vp8_bilinear
%1_h
, 6, 6 + npicregs
, 7, dst
, dststride
, src
, srcstride
, height
, mx
, picreg
777 lea picregq
, [bilinear_filter_vw_m
]
780 mova m5
, [bilinear_filter_vw
+mxq
-1*16]
782 mova m4
, [bilinear_filter_vw
+mxq
+7*16]
784 movh m0
, [srcq
+srcstrideq
*0+0]
785 movh m1
, [srcq
+srcstrideq
*0+1]
786 movh m2
, [srcq
+srcstrideq
*1+0]
787 movh m3
, [srcq
+srcstrideq
*1+1]
805 movh
[dstq
+dststrideq
*0], m0
806 movh
[dstq
+dststrideq
*1], m2
809 movh
[dstq
+dststrideq
*0], m0
810 movhps
[dstq
+dststrideq
*1], m0
812 %endif
; cpuflag(ssse3)
814 lea dstq
, [dstq
+dststrideq
*2]
815 lea srcq
, [srcq
+srcstrideq
*2]
831 cglobal put_vp8_pixels8
, 5, 5, 0, dst
, dststride
, src
, srcstride
, height
833 movq mm0
, [srcq
+srcstrideq
*0]
834 movq mm1
, [srcq
+srcstrideq
*1]
835 lea srcq
, [srcq
+srcstrideq
*2]
836 movq
[dstq
+dststrideq
*0], mm0
837 movq
[dstq
+dststrideq
*1], mm1
838 lea dstq
, [dstq
+dststrideq
*2]
845 cglobal put_vp8_pixels16
, 5, 5, 0, dst
, dststride
, src
, srcstride
, height
847 movq mm0
, [srcq
+srcstrideq
*0+0]
848 movq mm1
, [srcq
+srcstrideq
*0+8]
849 movq mm2
, [srcq
+srcstrideq
*1+0]
850 movq mm3
, [srcq
+srcstrideq
*1+8]
851 lea srcq
, [srcq
+srcstrideq
*2]
852 movq
[dstq
+dststrideq
*0+0], mm0
853 movq
[dstq
+dststrideq
*0+8], mm1
854 movq
[dstq
+dststrideq
*1+0], mm2
855 movq
[dstq
+dststrideq
*1+8], mm3
856 lea dstq
, [dstq
+dststrideq
*2]
863 cglobal put_vp8_pixels16
, 5, 5, 2, dst
, dststride
, src
, srcstride
, height
865 movups xmm0
, [srcq
+srcstrideq
*0]
866 movups xmm1
, [srcq
+srcstrideq
*1]
867 lea srcq
, [srcq
+srcstrideq
*2]
868 movaps
[dstq
+dststrideq
*0], xmm0
869 movaps
[dstq
+dststrideq
*1], xmm1
870 lea dstq
, [dstq
+dststrideq
*2]
875 ;-----------------------------------------------------------------------------
876 ; void ff_vp8_idct_dc_add_<opt>(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
877 ;-----------------------------------------------------------------------------
881 %4 m3
, [dst1q
+strideq
+%3]
883 %4 m5
, [dst2q
+strideq
+%3]
893 %4 [dst1q
+strideq
+%3], m3
895 %4 [dst2q
+strideq
+%3], m5
899 cglobal vp8_idct_dc_add
, 3, 3, 0, dst
, block
, stride
917 DEFINE_ARGS dst1
, dst2
, stride
918 lea dst2q
, [dst1q
+strideq
*2]
919 ADD_DC m0
, m1
, 0, movh
923 cglobal vp8_idct_dc_add
, 3, 3, 6, dst
, block
, stride
931 DEFINE_ARGS dst1
, dst2
, stride
932 lea dst2q
, [dst1q
+strideq
*2]
934 movd m3
, [dst1q
+strideq
]
936 movd m5
, [dst2q
+strideq
]
948 pextrd
[dst1q
+strideq
], m2
, 1
949 pextrd
[dst2q
], m2
, 2
950 pextrd
[dst2q
+strideq
], m2
, 3
953 ;-----------------------------------------------------------------------------
954 ; void ff_vp8_idct_dc_add4y_<opt>(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride);
955 ;-----------------------------------------------------------------------------
959 cglobal vp8_idct_dc_add4y
, 3, 3, 0, dst
, block
, stride
961 movd m0
, [blockq
+32*0] ; A
962 movd m1
, [blockq
+32*2] ; C
963 punpcklwd m0
, [blockq
+32*1] ; A B
964 punpcklwd m1
, [blockq
+32*3] ; C D
965 punpckldq m0
, m1
; A B C D
970 movd
[blockq
+32*0], m6
971 movd
[blockq
+32*1], m6
972 movd
[blockq
+32*2], m6
973 movd
[blockq
+32*3], m6
978 punpcklbw m0
, m0
; AABBCCDD
979 punpcklbw m6
, m6
; AABBCCDD
982 punpcklbw m0
, m0
; AAAABBBB
983 punpckhbw m1
, m1
; CCCCDDDD
984 punpcklbw m6
, m6
; AAAABBBB
985 punpckhbw m7
, m7
; CCCCDDDD
988 DEFINE_ARGS dst1
, dst2
, stride
989 lea dst2q
, [dst1q
+strideq
*2]
990 ADD_DC m0
, m6
, 0, mova
991 ADD_DC m1
, m7
, 8, mova
996 cglobal vp8_idct_dc_add4y
, 3, 3, 6, dst
, block
, stride
998 movd m0
, [blockq
+32*0] ; A
999 movd m1
, [blockq
+32*2] ; C
1000 punpcklwd m0
, [blockq
+32*1] ; A B
1001 punpcklwd m1
, [blockq
+32*3] ; C D
1002 punpckldq m0
, m1
; A B C D
1007 movd
[blockq
+32*0], m1
1008 movd
[blockq
+32*1], m1
1009 movd
[blockq
+32*2], m1
1010 movd
[blockq
+32*3], m1
1021 DEFINE_ARGS dst1
, dst2
, stride
1022 lea dst2q
, [dst1q
+strideq
*2]
1023 ADD_DC m0
, m1
, 0, mova
1026 ;-----------------------------------------------------------------------------
1027 ; void ff_vp8_idct_dc_add4uv_<opt>(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride);
1028 ;-----------------------------------------------------------------------------
1031 cglobal vp8_idct_dc_add4uv
, 3, 3, 0, dst
, block
, stride
1033 movd m0
, [blockq
+32*0] ; A
1034 movd m1
, [blockq
+32*2] ; C
1035 punpcklwd m0
, [blockq
+32*1] ; A B
1036 punpcklwd m1
, [blockq
+32*3] ; C D
1037 punpckldq m0
, m1
; A B C D
1042 movd
[blockq
+32*0], m6
1043 movd
[blockq
+32*1], m6
1044 movd
[blockq
+32*2], m6
1045 movd
[blockq
+32*3], m6
1050 punpcklbw m0
, m0
; AABBCCDD
1051 punpcklbw m6
, m6
; AABBCCDD
1054 punpcklbw m0
, m0
; AAAABBBB
1055 punpckhbw m1
, m1
; CCCCDDDD
1056 punpcklbw m6
, m6
; AAAABBBB
1057 punpckhbw m7
, m7
; CCCCDDDD
1060 DEFINE_ARGS dst1
, dst2
, stride
1061 lea dst2q
, [dst1q
+strideq
*2]
1062 ADD_DC m0
, m6
, 0, mova
1063 lea dst1q
, [dst1q
+strideq
*4]
1064 lea dst2q
, [dst2q
+strideq
*4]
1065 ADD_DC m1
, m7
, 0, mova
1068 ;-----------------------------------------------------------------------------
1069 ; void ff_vp8_idct_add_<opt>(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
1070 ;-----------------------------------------------------------------------------
1072 ; calculate %1=mul_35468(%1)-mul_20091(%2); %2=mul_20091(%1)+mul_35468(%2)
1073 ; this macro assumes that m6/m7 have words for 20091/17734 loaded
1074 %macro VP8_MULTIPLY_SUMSUB
4
1077 pmulhw
%3, m6
;20091(1)
1078 pmulhw
%4, m6
;20091(2)
1083 pmulhw
%1, m7
;35468(1)
1084 pmulhw
%2, m7
;35468(2)
1089 ; calculate x0=%1+%3; x1=%1-%3
1090 ; x2=mul_35468(%2)-mul_20091(%4); x3=mul_20091(%2)+mul_35468(%4)
1091 ; %1=x0+x3 (tmp0); %2=x1+x2 (tmp1); %3=x1-x2 (tmp2); %4=x0-x3 (tmp3)
1092 ; %5/%6 are temporary registers
1093 ; we assume m6/m7 have constant words 20091/17734 loaded in them
1094 %macro VP8_IDCT_TRANSFORM4x4_1D
6
1095 SUMSUB_BA w
, %3, %1, %5 ;t0, t1
1096 VP8_MULTIPLY_SUMSUB m
%2, m
%4, m
%5,m
%6 ;t2, t3
1097 SUMSUB_BA w
, %4, %3, %5 ;tmp0, tmp3
1098 SUMSUB_BA w
, %2, %1, %5 ;tmp1, tmp2
1103 %macro VP8_IDCT_ADD
0
1104 cglobal vp8_idct_add
, 3, 3, 0, dst
, block
, stride
1106 movq m0
, [blockq
+ 0]
1107 movq m1
, [blockq
+ 8]
1108 movq m2
, [blockq
+16]
1109 movq m3
, [blockq
+24]
1114 movaps
[blockq
+ 0], xmm0
1115 movaps
[blockq
+16], xmm0
1118 movq
[blockq
+ 0], m4
1119 movq
[blockq
+ 8], m4
1120 movq
[blockq
+16], m4
1121 movq
[blockq
+24], m4
1125 VP8_IDCT_TRANSFORM4x4_1D
0, 1, 2, 3, 4, 5
1126 TRANSPOSE4x4W
0, 1, 2, 3, 4
1128 VP8_IDCT_TRANSFORM4x4_1D
0, 1, 2, 3, 4, 5
1129 TRANSPOSE4x4W
0, 1, 2, 3, 4
1133 DEFINE_ARGS dst1
, dst2
, stride
1134 lea dst2q
, [dst1q
+2*strideq
]
1135 STORE_DIFFx2 m0
, m1
, m6
, m7
, m4
, 3, dst1q
, strideq
1136 STORE_DIFFx2 m2
, m3
, m6
, m7
, m4
, 3, dst2q
, strideq
1148 ;-----------------------------------------------------------------------------
1149 ; void ff_vp8_luma_dc_wht(int16_t block[4][4][16], int16_t dc[16])
1150 ;-----------------------------------------------------------------------------
1152 %macro SCATTER_WHT
3
1155 mov [blockq
+2*16*(0+%3)], dc1w
1156 mov [blockq
+2*16*(1+%3)], dc2w
1161 mov [blockq
+2*16*(4+%3)], dc1w
1162 mov [blockq
+2*16*(5+%3)], dc2w
1165 mov [blockq
+2*16*(8+%3)], dc1w
1166 mov [blockq
+2*16*(9+%3)], dc2w
1169 mov [blockq
+2*16*(12+%3)], dc1w
1170 mov [blockq
+2*16*(13+%3)], dc2w
1173 %macro HADAMARD4_1D
4
1174 SUMSUB_BADC w
, %2, %1, %4, %3
1175 SUMSUB_BADC w
, %4, %2, %3, %1
1180 cglobal vp8_luma_dc_wht
, 2, 3, 0, block
, dc1
, dc2
1187 movaps
[dc1q
+ 0], xmm0
1188 movaps
[dc1q
+16], xmm0
1196 HADAMARD4_1D
0, 1, 2, 3
1197 TRANSPOSE4x4W
0, 1, 2, 3, 4
1199 HADAMARD4_1D
0, 1, 2, 3