1 ;******************************************************************************
2 ;* VP8 MMXEXT optimizations
3 ;* Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com>
4 ;* Copyright (c) 2010 Jason Garrett-Glaser <darkshikari@gmail.com>
6 ;* This file is part of Libav.
8 ;* Libav is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* Libav is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with Libav; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "libavutil/x86/x86util.asm"
27 fourtap_filter_hw_m: times
4 dw -6, 123
36 sixtap_filter_hw_m: times
4 dw 2, -11
46 fourtap_filter_hb_m: times
8 db -6, 123
55 sixtap_filter_hb_m: times
8 db 2, 1
65 fourtap_filter_v_m: times
8 dw -6
82 sixtap_filter_v_m: times
8 dw 2
101 bilinear_filter_vw_m: times
8 dw 1
109 bilinear_filter_vb_m: times
8 db 7, 1
118 %define fourtap_filter_hw picregq
119 %define sixtap_filter_hw picregq
120 %define fourtap_filter_hb picregq
121 %define sixtap_filter_hb picregq
122 %define fourtap_filter_v picregq
123 %define sixtap_filter_v picregq
124 %define bilinear_filter_vw picregq
125 %define bilinear_filter_vb picregq
128 %define fourtap_filter_hw fourtap_filter_hw_m
129 %define sixtap_filter_hw sixtap_filter_hw_m
130 %define fourtap_filter_hb fourtap_filter_hb_m
131 %define sixtap_filter_hb sixtap_filter_hb_m
132 %define fourtap_filter_v fourtap_filter_v_m
133 %define sixtap_filter_v sixtap_filter_v_m
134 %define bilinear_filter_vw bilinear_filter_vw_m
135 %define bilinear_filter_vb bilinear_filter_vb_m
139 filter_h2_shuf: db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
140 filter_h4_shuf: db 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10
142 filter_h6_shuf1: db 0, 5, 1, 6, 2, 7, 3, 8, 4, 9, 5, 10, 6, 11, 7, 12
143 filter_h6_shuf2: db 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9
144 filter_h6_shuf3: db 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11
146 pw_256: times
8 dw 256
148 pw_20091: times
4 dw 20091
149 pw_17734: times
4 dw 17734
151 pb_27_63: times
8 db 27, 63
152 pb_18_63: times
8 db 18, 63
153 pb_9_63: times
8 db 9, 63
171 ;-----------------------------------------------------------------------------
172 ; subpel MC functions:
174 ; void put_vp8_epel<size>_h<htap>v<vtap>_<opt>(uint8_t *dst, int deststride,
175 ; uint8_t *src, int srcstride,
176 ; int height, int mx, int my);
177 ;-----------------------------------------------------------------------------
179 %macro FILTER_SSSE3
1
180 cglobal put_vp8_epel
%1_h6
, 6, 6 + npicregs
, 8, dst
, dststride
, src
, srcstride
, height
, mx
, picreg
182 mova m3
, [filter_h6_shuf2
]
183 mova m4
, [filter_h6_shuf3
]
185 lea picregq
, [sixtap_filter_hb_m
]
187 mova m5
, [sixtap_filter_hb
+mxq
*8-48] ; set up 6tap filter in bytes
188 mova m6
, [sixtap_filter_hb
+mxq
*8-32]
189 mova m7
, [sixtap_filter_hb
+mxq
*8-16]
196 ; For epel4, we need 9 bytes, but only 8 get loaded; to compensate, do the
197 ; shuffle with a memory operand
198 punpcklbw m0
, [srcq
+3]
200 pshufb m0
, [filter_h6_shuf1
]
209 pmulhrsw m0
, [pw_256
]
211 movh
[dstq
], m0
; store
216 dec heightd
; next row
220 cglobal put_vp8_epel
%1_h4
, 6, 6 + npicregs
, 7, dst
, dststride
, src
, srcstride
, height
, mx
, picreg
223 mova m3
, [filter_h2_shuf
]
224 mova m4
, [filter_h4_shuf
]
226 lea picregq
, [fourtap_filter_hb_m
]
228 mova m5
, [fourtap_filter_hb
+mxq
-16] ; set up 4tap filter in bytes
229 mova m6
, [fourtap_filter_hb
+mxq
]
241 movh
[dstq
], m0
; store
246 dec heightd
; next row
250 cglobal put_vp8_epel
%1_v4
, 7, 7, 8, dst
, dststride
, src
, srcstride
, height
, picreg
, my
253 lea picregq
, [fourtap_filter_hb_m
]
255 mova m5
, [fourtap_filter_hb
+myq
-16]
256 mova m6
, [fourtap_filter_hb
+myq
]
262 movh m1
, [srcq
+ srcstrideq
]
263 movh m2
, [srcq
+2*srcstrideq
]
267 movh m3
, [srcq
+2*srcstrideq
] ; read new row
284 dec heightd
; next row
288 cglobal put_vp8_epel
%1_v6
, 7, 7, 8, dst
, dststride
, src
, srcstride
, height
, picreg
, my
291 lea picregq
, [sixtap_filter_hb_m
]
293 lea myq
, [sixtap_filter_hb
+myq
*8]
299 movh m1
, [srcq
+srcstrideq
]
300 movh m2
, [srcq
+srcstrideq
*2]
301 lea srcq
, [srcq
+srcstrideq
*2]
304 movh m4
, [srcq
+srcstrideq
]
307 movh m5
, [srcq
+2*srcstrideq
] ; read new row
314 pmaddubsw m6
, [myq
-48]
315 pmaddubsw m1
, [myq
-32]
316 pmaddubsw m7
, [myq
-16]
321 pmulhrsw m6
, [pw_256
]
330 dec heightd
; next row
340 ; 4x4 block, H-only 4-tap filter
342 cglobal put_vp8_epel4_h4
, 6, 6 + npicregs
, 0, dst
, dststride
, src
, srcstride
, height
, mx
, picreg
345 lea picregq
, [fourtap_filter_hw_m
]
347 movq mm4
, [fourtap_filter_hw
+mxq
-16] ; set up 4tap filter in words
348 movq mm5
, [fourtap_filter_hw
+mxq
]
353 movq mm1
, [srcq
-1] ; (ABCDEFGH) load 8 horizontal pixels
355 ; first set of 2 pixels
356 movq mm2
, mm1
; byte ABCD..
357 punpcklbw mm1
, mm6
; byte->word ABCD
358 pshufw mm0
, mm2
, 9 ; byte CDEF..
359 punpcklbw mm0
, mm6
; byte->word CDEF
360 pshufw mm3
, mm1
, 0x94 ; word ABBC
361 pshufw mm1
, mm0
, 0x94 ; word CDDE
362 pmaddwd mm3
, mm4
; multiply 2px with F0/F1
363 movq mm0
, mm1
; backup for second set of pixels
364 pmaddwd mm1
, mm5
; multiply 2px with F2/F3
365 paddd mm3
, mm1
; finish 1st 2px
367 ; second set of 2 pixels, use backup of above
368 punpckhbw mm2
, mm6
; byte->word EFGH
369 pmaddwd mm0
, mm4
; multiply backed up 2px with F0/F1
370 pshufw mm1
, mm2
, 0x94 ; word EFFG
371 pmaddwd mm1
, mm5
; multiply 2px with F2/F3
372 paddd mm0
, mm1
; finish 2nd 2px
374 ; merge two sets of 2 pixels into one set of 4, round/clip/store
375 packssdw mm3
, mm0
; merge dword->word (4px)
376 paddsw mm3
, mm7
; rounding
378 packuswb mm3
, mm6
; clip and word->bytes
379 movd
[dstq
], mm3
; store
384 dec heightd
; next row
388 ; 4x4 block, H-only 6-tap filter
390 cglobal put_vp8_epel4_h6
, 6, 6 + npicregs
, 0, dst
, dststride
, src
, srcstride
, height
, mx
, picreg
393 lea picregq
, [sixtap_filter_hw_m
]
395 movq mm4
, [sixtap_filter_hw
+mxq
*8-48] ; set up 4tap filter in words
396 movq mm5
, [sixtap_filter_hw
+mxq
*8-32]
397 movq mm6
, [sixtap_filter_hw
+mxq
*8-16]
402 movq mm1
, [srcq
-2] ; (ABCDEFGH) load 8 horizontal pixels
404 ; first set of 2 pixels
405 movq mm2
, mm1
; byte ABCD..
406 punpcklbw mm1
, mm3
; byte->word ABCD
407 pshufw mm0
, mm2
, 0x9 ; byte CDEF..
408 punpckhbw mm2
, mm3
; byte->word EFGH
409 punpcklbw mm0
, mm3
; byte->word CDEF
410 pshufw mm1
, mm1
, 0x94 ; word ABBC
411 pshufw mm2
, mm2
, 0x94 ; word EFFG
412 pmaddwd mm1
, mm4
; multiply 2px with F0/F1
413 pshufw mm3
, mm0
, 0x94 ; word CDDE
414 movq mm0
, mm3
; backup for second set of pixels
415 pmaddwd mm3
, mm5
; multiply 2px with F2/F3
416 paddd mm1
, mm3
; add to 1st 2px cache
417 movq mm3
, mm2
; backup for second set of pixels
418 pmaddwd mm2
, mm6
; multiply 2px with F4/F5
419 paddd mm1
, mm2
; finish 1st 2px
421 ; second set of 2 pixels, use backup of above
422 movd mm2
, [srcq
+3] ; byte FGHI (prevent overreads)
423 pmaddwd mm0
, mm4
; multiply 1st backed up 2px with F0/F1
424 pmaddwd mm3
, mm5
; multiply 2nd backed up 2px with F2/F3
425 paddd mm0
, mm3
; add to 2nd 2px cache
427 punpcklbw mm2
, mm3
; byte->word FGHI
428 pshufw mm2
, mm2
, 0xE9 ; word GHHI
429 pmaddwd mm2
, mm6
; multiply 2px with F4/F5
430 paddd mm0
, mm2
; finish 2nd 2px
432 ; merge two sets of 2 pixels into one set of 4, round/clip/store
433 packssdw mm1
, mm0
; merge dword->word (4px)
434 paddsw mm1
, mm7
; rounding
436 packuswb mm1
, mm3
; clip and word->bytes
437 movd
[dstq
], mm1
; store
442 dec heightd
; next row
447 cglobal put_vp8_epel8_h4
, 6, 6 + npicregs
, 10, dst
, dststride
, src
, srcstride
, height
, mx
, picreg
450 lea picregq
, [fourtap_filter_v_m
]
452 lea mxq
, [fourtap_filter_v
+mxq
-32]
485 movh
[dstq
], m0
; store
490 dec heightd
; next row
495 cglobal put_vp8_epel8_h6
, 6, 6 + npicregs
, 14, dst
, dststride
, src
, srcstride
, height
, mx
, picreg
499 lea picregq
, [sixtap_filter_v_m
]
501 lea mxq
, [sixtap_filter_v
+mxq
-96]
548 movh
[dstq
], m0
; store
553 dec heightd
; next row
558 ; 4x4 block, V-only 4-tap filter
559 cglobal put_vp8_epel
%1_v4
, 7, 7, 8, dst
, dststride
, src
, srcstride
, height
, picreg
, my
562 lea picregq
, [fourtap_filter_v_m
]
564 lea myq
, [fourtap_filter_v
+myq
-32]
572 movh m1
, [srcq
+ srcstrideq
]
573 movh m2
, [srcq
+2*srcstrideq
]
580 ; first calculate negative taps (to prevent losing positive overflows)
581 movh m4
, [srcq
+2*srcstrideq
] ; read new row
588 ; then calculate positive taps
606 dec heightd
; next row
611 ; 4x4 block, V-only 6-tap filter
612 cglobal put_vp8_epel
%1_v6
, 7, 7, 8, dst
, dststride
, src
, srcstride
, height
, picreg
, my
616 lea picregq
, [sixtap_filter_v_m
]
618 lea myq
, [sixtap_filter_v
+myq
-96]
625 movh m1
, [srcq
+srcstrideq
]
626 movh m2
, [srcq
+srcstrideq
*2]
627 lea srcq
, [srcq
+srcstrideq
*2]
630 movh m4
, [srcq
+srcstrideq
]
638 ; first calculate negative taps (to prevent losing positive overflows)
645 ; then calculate positive taps
646 movh m5
, [srcq
+2*srcstrideq
] ; read new row
671 dec heightd
; next row
681 %macro FILTER_BILINEAR
1
682 cglobal put_vp8_bilinear
%1_v
, 7, 7, 7, dst
, dststride
, src
, srcstride
, height
, picreg
, my
685 lea picregq
, [bilinear_filter_vw_m
]
688 mova m5
, [bilinear_filter_vw
+myq
-1*16]
690 mova m4
, [bilinear_filter_vw
+myq
+7*16]
692 movh m0
, [srcq
+srcstrideq
*0]
693 movh m1
, [srcq
+srcstrideq
*1]
694 movh m3
, [srcq
+srcstrideq
*2]
712 movh
[dstq
+dststrideq
*0], m0
713 movh
[dstq
+dststrideq
*1], m2
716 movh
[dstq
+dststrideq
*0], m0
717 movhps
[dstq
+dststrideq
*1], m0
720 lea dstq
, [dstq
+dststrideq
*2]
721 lea srcq
, [srcq
+srcstrideq
*2]
726 cglobal put_vp8_bilinear
%1_h
, 6, 6 + npicregs
, 7, dst
, dststride
, src
, srcstride
, height
, mx
, picreg
729 lea picregq
, [bilinear_filter_vw_m
]
732 mova m5
, [bilinear_filter_vw
+mxq
-1*16]
734 mova m4
, [bilinear_filter_vw
+mxq
+7*16]
736 movh m0
, [srcq
+srcstrideq
*0+0]
737 movh m1
, [srcq
+srcstrideq
*0+1]
738 movh m2
, [srcq
+srcstrideq
*1+0]
739 movh m3
, [srcq
+srcstrideq
*1+1]
757 movh
[dstq
+dststrideq
*0], m0
758 movh
[dstq
+dststrideq
*1], m2
761 movh
[dstq
+dststrideq
*0], m0
762 movhps
[dstq
+dststrideq
*1], m0
765 lea dstq
, [dstq
+dststrideq
*2]
766 lea srcq
, [srcq
+srcstrideq
*2]
777 %macro FILTER_BILINEAR_SSSE3
1
778 cglobal put_vp8_bilinear
%1_v
, 7, 7, 5, dst
, dststride
, src
, srcstride
, height
, picreg
, my
781 lea picregq
, [bilinear_filter_vb_m
]
784 mova m3
, [bilinear_filter_vb
+myq
-16]
786 movh m0
, [srcq
+srcstrideq
*0]
787 movh m1
, [srcq
+srcstrideq
*1]
788 movh m2
, [srcq
+srcstrideq
*2]
800 movh
[dstq
+dststrideq
*0], m0
801 movh
[dstq
+dststrideq
*1], m1
804 movh
[dstq
+dststrideq
*0], m0
805 movhps
[dstq
+dststrideq
*1], m0
808 lea dstq
, [dstq
+dststrideq
*2]
809 lea srcq
, [srcq
+srcstrideq
*2]
814 cglobal put_vp8_bilinear
%1_h
, 6, 6 + npicregs
, 5, dst
, dststride
, src
, srcstride
, height
, mx
, picreg
817 lea picregq
, [bilinear_filter_vb_m
]
820 mova m2
, [filter_h2_shuf
]
821 mova m3
, [bilinear_filter_vb
+mxq
-16]
823 movu m0
, [srcq
+srcstrideq
*0]
824 movu m1
, [srcq
+srcstrideq
*1]
836 movh
[dstq
+dststrideq
*0], m0
837 movh
[dstq
+dststrideq
*1], m1
840 movh
[dstq
+dststrideq
*0], m0
841 movhps
[dstq
+dststrideq
*1], m0
844 lea dstq
, [dstq
+dststrideq
*2]
845 lea srcq
, [srcq
+srcstrideq
*2]
852 FILTER_BILINEAR_SSSE3
4
854 FILTER_BILINEAR_SSSE3
8
857 cglobal put_vp8_pixels8
, 5, 5, 0, dst
, dststride
, src
, srcstride
, height
859 movq mm0
, [srcq
+srcstrideq
*0]
860 movq mm1
, [srcq
+srcstrideq
*1]
861 lea srcq
, [srcq
+srcstrideq
*2]
862 movq
[dstq
+dststrideq
*0], mm0
863 movq
[dstq
+dststrideq
*1], mm1
864 lea dstq
, [dstq
+dststrideq
*2]
871 cglobal put_vp8_pixels16
, 5, 5, 0, dst
, dststride
, src
, srcstride
, height
873 movq mm0
, [srcq
+srcstrideq
*0+0]
874 movq mm1
, [srcq
+srcstrideq
*0+8]
875 movq mm2
, [srcq
+srcstrideq
*1+0]
876 movq mm3
, [srcq
+srcstrideq
*1+8]
877 lea srcq
, [srcq
+srcstrideq
*2]
878 movq
[dstq
+dststrideq
*0+0], mm0
879 movq
[dstq
+dststrideq
*0+8], mm1
880 movq
[dstq
+dststrideq
*1+0], mm2
881 movq
[dstq
+dststrideq
*1+8], mm3
882 lea dstq
, [dstq
+dststrideq
*2]
889 cglobal put_vp8_pixels16
, 5, 5, 2, dst
, dststride
, src
, srcstride
, height
891 movups xmm0
, [srcq
+srcstrideq
*0]
892 movups xmm1
, [srcq
+srcstrideq
*1]
893 lea srcq
, [srcq
+srcstrideq
*2]
894 movaps
[dstq
+dststrideq
*0], xmm0
895 movaps
[dstq
+dststrideq
*1], xmm1
896 lea dstq
, [dstq
+dststrideq
*2]
901 ;-----------------------------------------------------------------------------
902 ; void vp8_idct_dc_add_<opt>(uint8_t *dst, int16_t block[16], int stride);
903 ;-----------------------------------------------------------------------------
907 %4 m3
, [dst1q
+strideq
+%3]
909 %4 m5
, [dst2q
+strideq
+%3]
919 %4 [dst1q
+strideq
+%3], m3
921 %4 [dst2q
+strideq
+%3], m5
925 cglobal vp8_idct_dc_add
, 3, 3, 0, dst
, block
, stride
943 DEFINE_ARGS dst1
, dst2
, stride
944 lea dst2q
, [dst1q
+strideq
*2]
945 ADD_DC m0
, m1
, 0, movh
949 cglobal vp8_idct_dc_add
, 3, 3, 6, dst
, block
, stride
957 DEFINE_ARGS dst1
, dst2
, stride
958 lea dst2q
, [dst1q
+strideq
*2]
960 movd m3
, [dst1q
+strideq
]
962 movd m5
, [dst2q
+strideq
]
974 pextrd
[dst1q
+strideq
], m2
, 1
975 pextrd
[dst2q
], m2
, 2
976 pextrd
[dst2q
+strideq
], m2
, 3
979 ;-----------------------------------------------------------------------------
980 ; void vp8_idct_dc_add4y_<opt>(uint8_t *dst, int16_t block[4][16], int stride);
981 ;-----------------------------------------------------------------------------
985 cglobal vp8_idct_dc_add4y
, 3, 3, 0, dst
, block
, stride
987 movd m0
, [blockq
+32*0] ; A
988 movd m1
, [blockq
+32*2] ; C
989 punpcklwd m0
, [blockq
+32*1] ; A B
990 punpcklwd m1
, [blockq
+32*3] ; C D
991 punpckldq m0
, m1
; A B C D
996 movd
[blockq
+32*0], m6
997 movd
[blockq
+32*1], m6
998 movd
[blockq
+32*2], m6
999 movd
[blockq
+32*3], m6
1004 punpcklbw m0
, m0
; AABBCCDD
1005 punpcklbw m6
, m6
; AABBCCDD
1008 punpcklbw m0
, m0
; AAAABBBB
1009 punpckhbw m1
, m1
; CCCCDDDD
1010 punpcklbw m6
, m6
; AAAABBBB
1011 punpckhbw m7
, m7
; CCCCDDDD
1014 DEFINE_ARGS dst1
, dst2
, stride
1015 lea dst2q
, [dst1q
+strideq
*2]
1016 ADD_DC m0
, m6
, 0, mova
1017 ADD_DC m1
, m7
, 8, mova
1022 cglobal vp8_idct_dc_add4y
, 3, 3, 6, dst
, block
, stride
1024 movd m0
, [blockq
+32*0] ; A
1025 movd m1
, [blockq
+32*2] ; C
1026 punpcklwd m0
, [blockq
+32*1] ; A B
1027 punpcklwd m1
, [blockq
+32*3] ; C D
1028 punpckldq m0
, m1
; A B C D
1033 movd
[blockq
+32*0], m1
1034 movd
[blockq
+32*1], m1
1035 movd
[blockq
+32*2], m1
1036 movd
[blockq
+32*3], m1
1047 DEFINE_ARGS dst1
, dst2
, stride
1048 lea dst2q
, [dst1q
+strideq
*2]
1049 ADD_DC m0
, m1
, 0, mova
1052 ;-----------------------------------------------------------------------------
1053 ; void vp8_idct_dc_add4uv_<opt>(uint8_t *dst, int16_t block[4][16], int stride);
1054 ;-----------------------------------------------------------------------------
1057 cglobal vp8_idct_dc_add4uv
, 3, 3, 0, dst
, block
, stride
1059 movd m0
, [blockq
+32*0] ; A
1060 movd m1
, [blockq
+32*2] ; C
1061 punpcklwd m0
, [blockq
+32*1] ; A B
1062 punpcklwd m1
, [blockq
+32*3] ; C D
1063 punpckldq m0
, m1
; A B C D
1068 movd
[blockq
+32*0], m6
1069 movd
[blockq
+32*1], m6
1070 movd
[blockq
+32*2], m6
1071 movd
[blockq
+32*3], m6
1076 punpcklbw m0
, m0
; AABBCCDD
1077 punpcklbw m6
, m6
; AABBCCDD
1080 punpcklbw m0
, m0
; AAAABBBB
1081 punpckhbw m1
, m1
; CCCCDDDD
1082 punpcklbw m6
, m6
; AAAABBBB
1083 punpckhbw m7
, m7
; CCCCDDDD
1086 DEFINE_ARGS dst1
, dst2
, stride
1087 lea dst2q
, [dst1q
+strideq
*2]
1088 ADD_DC m0
, m6
, 0, mova
1089 lea dst1q
, [dst1q
+strideq
*4]
1090 lea dst2q
, [dst2q
+strideq
*4]
1091 ADD_DC m1
, m7
, 0, mova
1094 ;-----------------------------------------------------------------------------
1095 ; void vp8_idct_add_<opt>(uint8_t *dst, int16_t block[16], int stride);
1096 ;-----------------------------------------------------------------------------
1098 ; calculate %1=mul_35468(%1)-mul_20091(%2); %2=mul_20091(%1)+mul_35468(%2)
1099 ; this macro assumes that m6/m7 have words for 20091/17734 loaded
1100 %macro VP8_MULTIPLY_SUMSUB
4
1103 pmulhw
%3, m6
;20091(1)
1104 pmulhw
%4, m6
;20091(2)
1109 pmulhw
%1, m7
;35468(1)
1110 pmulhw
%2, m7
;35468(2)
1115 ; calculate x0=%1+%3; x1=%1-%3
1116 ; x2=mul_35468(%2)-mul_20091(%4); x3=mul_20091(%2)+mul_35468(%4)
1117 ; %1=x0+x3 (tmp0); %2=x1+x2 (tmp1); %3=x1-x2 (tmp2); %4=x0-x3 (tmp3)
1118 ; %5/%6 are temporary registers
1119 ; we assume m6/m7 have constant words 20091/17734 loaded in them
1120 %macro VP8_IDCT_TRANSFORM4x4_1D
6
1121 SUMSUB_BA w
, %3, %1, %5 ;t0, t1
1122 VP8_MULTIPLY_SUMSUB m
%2, m
%4, m
%5,m
%6 ;t2, t3
1123 SUMSUB_BA w
, %4, %3, %5 ;tmp0, tmp3
1124 SUMSUB_BA w
, %2, %1, %5 ;tmp1, tmp2
1129 %macro VP8_IDCT_ADD
0
1130 cglobal vp8_idct_add
, 3, 3, 0, dst
, block
, stride
1132 movq m0
, [blockq
+ 0]
1133 movq m1
, [blockq
+ 8]
1134 movq m2
, [blockq
+16]
1135 movq m3
, [blockq
+24]
1140 movaps
[blockq
+ 0], xmm0
1141 movaps
[blockq
+16], xmm0
1144 movq
[blockq
+ 0], m4
1145 movq
[blockq
+ 8], m4
1146 movq
[blockq
+16], m4
1147 movq
[blockq
+24], m4
1151 VP8_IDCT_TRANSFORM4x4_1D
0, 1, 2, 3, 4, 5
1152 TRANSPOSE4x4W
0, 1, 2, 3, 4
1154 VP8_IDCT_TRANSFORM4x4_1D
0, 1, 2, 3, 4, 5
1155 TRANSPOSE4x4W
0, 1, 2, 3, 4
1159 DEFINE_ARGS dst1
, dst2
, stride
1160 lea dst2q
, [dst1q
+2*strideq
]
1161 STORE_DIFFx2 m0
, m1
, m6
, m7
, m4
, 3, dst1q
, strideq
1162 STORE_DIFFx2 m2
, m3
, m6
, m7
, m4
, 3, dst2q
, strideq
1174 ;-----------------------------------------------------------------------------
1175 ; void vp8_luma_dc_wht_mmxext(int16_t block[4][4][16], int16_t dc[16])
1176 ;-----------------------------------------------------------------------------
1178 %macro SCATTER_WHT
3
1181 mov [blockq
+2*16*(0+%3)], dc1w
1182 mov [blockq
+2*16*(1+%3)], dc2w
1187 mov [blockq
+2*16*(4+%3)], dc1w
1188 mov [blockq
+2*16*(5+%3)], dc2w
1191 mov [blockq
+2*16*(8+%3)], dc1w
1192 mov [blockq
+2*16*(9+%3)], dc2w
1195 mov [blockq
+2*16*(12+%3)], dc1w
1196 mov [blockq
+2*16*(13+%3)], dc2w
1199 %macro HADAMARD4_1D
4
1200 SUMSUB_BADC w
, %2, %1, %4, %3
1201 SUMSUB_BADC w
, %4, %2, %3, %1
1206 cglobal vp8_luma_dc_wht
, 2, 3, 0, block
, dc1
, dc2
1213 movaps
[dc1q
+ 0], xmm0
1214 movaps
[dc1q
+16], xmm0
1222 HADAMARD4_1D
0, 1, 2, 3
1223 TRANSPOSE4x4W
0, 1, 2, 3, 4
1225 HADAMARD4_1D
0, 1, 2, 3
1242 ;-----------------------------------------------------------------------------
1243 ; void vp8_h/v_loop_filter_simple_<opt>(uint8_t *dst, int stride, int flim);
1244 ;-----------------------------------------------------------------------------
1246 ; macro called with 7 mm register indexes as argument, and 4 regular registers
1248 ; first 4 mm registers will carry the transposed pixel data
1249 ; the other three are scratchspace (one would be sufficient, but this allows
1250 ; for more spreading/pipelining and thus faster execution on OOE CPUs)
1252 ; first two regular registers are buf+4*stride and buf+5*stride
1253 ; third is -stride, fourth is +stride
1254 %macro READ_8x4_INTERLEAVED
11
1255 ; interleave 8 (A-H) rows of 4 pixels each
1256 movd m
%1, [%8+%10*4] ; A0-3
1257 movd m
%5, [%9+%10*4] ; B0-3
1258 movd m
%2, [%8+%10*2] ; C0-3
1259 movd m
%6, [%8+%10] ; D0-3
1260 movd m
%3, [%8] ; E0-3
1261 movd m
%7, [%9] ; F0-3
1262 movd m
%4, [%9+%11] ; G0-3
1263 punpcklbw m
%1, m
%5 ; A/B interleaved
1264 movd m
%5, [%9+%11*2] ; H0-3
1265 punpcklbw m
%2, m
%6 ; C/D interleaved
1266 punpcklbw m
%3, m
%7 ; E/F interleaved
1267 punpcklbw m
%4, m
%5 ; G/H interleaved
1270 ; macro called with 7 mm register indexes as argument, and 5 regular registers
1271 ; first 11 mean the same as READ_8x4_TRANSPOSED above
1272 ; fifth regular register is scratchspace to reach the bottom 8 rows, it
1273 ; will be set to second regular register + 8*stride at the end
1274 %macro READ_16x4_INTERLEAVED
12
1275 ; transpose 16 (A-P) rows of 4 pixels each
1278 ; read (and interleave) those addressable by %8 (=r0), A/C/D/E/I/K/L/M
1279 movd m
%1, [%8+%10*4] ; A0-3
1280 movd m
%3, [%12+%10*4] ; I0-3
1281 movd m
%2, [%8+%10*2] ; C0-3
1282 movd m
%4, [%12+%10*2] ; K0-3
1283 movd m
%6, [%8+%10] ; D0-3
1284 movd m
%5, [%12+%10] ; L0-3
1285 movd m
%7, [%12] ; M0-3
1287 punpcklbw m
%1, m
%3 ; A/I
1288 movd m
%3, [%8] ; E0-3
1289 punpcklbw m
%2, m
%4 ; C/K
1290 punpcklbw m
%6, m
%5 ; D/L
1291 punpcklbw m
%3, m
%7 ; E/M
1292 punpcklbw m
%2, m
%6 ; C/D/K/L interleaved
1294 ; read (and interleave) those addressable by %9 (=r4), B/F/G/H/J/N/O/P
1295 movd m
%5, [%9+%10*4] ; B0-3
1296 movd m
%4, [%12+%10*4] ; J0-3
1297 movd m
%7, [%9] ; F0-3
1298 movd m
%6, [%12] ; N0-3
1299 punpcklbw m
%5, m
%4 ; B/J
1300 punpcklbw m
%7, m
%6 ; F/N
1301 punpcklbw m
%1, m
%5 ; A/B/I/J interleaved
1302 punpcklbw m
%3, m
%7 ; E/F/M/N interleaved
1303 movd m
%4, [%9+%11] ; G0-3
1304 movd m
%6, [%12+%11] ; O0-3
1305 movd m
%5, [%9+%11*2] ; H0-3
1306 movd m
%7, [%12+%11*2] ; P0-3
1307 punpcklbw m
%4, m
%6 ; G/O
1308 punpcklbw m
%5, m
%7 ; H/P
1309 punpcklbw m
%4, m
%5 ; G/H/O/P interleaved
1312 ; write 4 mm registers of 2 dwords each
1313 ; first four arguments are mm register indexes containing source data
1314 ; last four are registers containing buf+4*stride, buf+5*stride,
1315 ; -stride and +stride
1317 ; write out (2 dwords per register)
1332 ; write 4 xmm registers of 4 dwords each
1333 ; arguments same as WRITE_2x4D, but with an extra register, so that the 5 regular
1334 ; registers contain buf+4*stride, buf+5*stride, buf+12*stride, -stride and +stride
1335 ; we add 1*stride to the third regular registry in the process
1336 ; the 10th argument is 16 if it's a Y filter (i.e. all regular registers cover the
1337 ; same memory region), or 8 if they cover two separate buffers (third one points to
1338 ; a different memory region than the first two), allowing for more optimal code for
1340 %macro WRITE_4x4D
10
1341 ; write out (4 dwords per register), start with dwords zero
1392 ; write 4 or 8 words in the mmx/xmm registers as 8 lines
1393 ; 1 and 2 are the registers to write, this can be the same (for SSE2)
1395 ; 3 is a general-purpose register that we will clobber
1397 ; 3 is a pointer to the destination's 5th line
1398 ; 4 is a pointer to the destination's 4th line
1399 ; 5/6 is -stride and +stride
1430 pextrw
[%3+%4*4], %1, 0
1431 pextrw
[%2+%4*4], %1, 1
1432 pextrw
[%3+%4*2], %1, 2
1433 pextrw
[%3+%4 ], %1, 3
1436 pextrw
[%2+%5 ], %1, 6
1437 pextrw
[%2+%5*2], %1, 7
1467 %macro SIMPLE_LOOPFILTER
2
1468 cglobal vp8_
%1_loop_filter_simple
, 3, %2, 8, dst
, stride
, flim
, cntr
1469 %if mmsize
== 8 ; mmx/mmxext
1475 SPLATB_REG m7
, flim
, m0
; splat "flim" into register
1477 ; set up indexes to address 4 rows
1479 DEFINE_ARGS dst1
, mstride
, stride
, cntr
, dst2
1481 DEFINE_ARGS dst1
, mstride
, stride
, dst3
, dst2
1483 mov strideq
, mstrideq
1486 lea dst1q
, [dst1q
+4*strideq
-2]
1489 %if mmsize
== 8 ; mmx / mmxext
1493 ; read 4 half/full rows of pixels
1494 mova m0
, [dst1q
+mstrideq
*2] ; p1
1495 mova m1
, [dst1q
+mstrideq
] ; p0
1496 mova m2
, [dst1q
] ; q0
1497 mova m3
, [dst1q
+ strideq
] ; q1
1499 lea dst2q
, [dst1q
+ strideq
]
1501 %if mmsize
== 8 ; mmx/mmxext
1502 READ_8x4_INTERLEAVED
0, 1, 2, 3, 4, 5, 6, dst1q
, dst2q
, mstrideq
, strideq
1504 READ_16x4_INTERLEAVED
0, 1, 2, 3, 4, 5, 6, dst1q
, dst2q
, mstrideq
, strideq
, dst3q
1506 TRANSPOSE4x4W
0, 1, 2, 3, 4
1510 mova m5
, m2
; m5=backup of q0
1511 mova m6
, m1
; m6=backup of p0
1512 psubusb m1
, m2
; p0-q0
1513 psubusb m2
, m6
; q0-p0
1514 por m1
, m2
; FFABS(p0-q0)
1515 paddusb m1
, m1
; m1=FFABS(p0-q0)*2
1519 psubusb m3
, m0
; q1-p1
1520 psubusb m0
, m4
; p1-q1
1521 por m3
, m0
; FFABS(p1-q1)
1525 psubsb m2
, m4
; m2=p1-q1 (signed) backup for below
1527 psrlq m3
, 1 ; m3=FFABS(p1-q1)/2, this can be used signed
1531 pcmpeqb m3
, m1
; abs(p0-q0)*2+abs(p1-q1)/2<=flim mask(0xff/0x0)
1533 ; filter_common (use m2/p1-q1, m4=q0, m6=p0, m5/q0-p0 and m3/mask)
1537 psubsb m5
, m0
; q0-p0 (signed)
1540 paddsb m2
, m5
; a=(p1-q1) + 3*(q0-p0)
1541 pand m2
, m3
; apply filter mask (m3)
1545 paddsb m2
, [pb_4
] ; f1<<3=a+4
1546 paddsb m1
, [pb_3
] ; f2<<3=a+3
1548 pand m1
, m3
; cache f2<<3
1552 pcmpgtb m0
, m2
; which values are <0?
1553 psubb m3
, m2
; -f1<<3
1559 paddusb m4
, m3
; q0-f1
1563 pcmpgtb m0
, m1
; which values are <0?
1564 psubb m3
, m1
; -f2<<3
1570 psubusb m6
, m3
; p0+f2
1575 mova
[dst1q
+mstrideq
], m6
1578 SBUTTERFLY bw
, 6, 4, 0
1580 %if mmsize
== 16 ; sse2
1584 WRITE_8W m6
, dst2q
, dst1q
, mstrideq
, strideq
1585 lea dst2q
, [dst3q
+mstrideq
+1]
1589 WRITE_8W m4
, dst3q
, dst2q
, mstrideq
, strideq
1591 WRITE_2x4W m6
, m4
, dst2q
, dst1q
, mstrideq
, strideq
1595 %if mmsize
== 8 ; mmx/mmxext
1598 add dst1q
, 8 ; advance 8 cols = pixels
1600 lea dst1q
, [dst1q
+strideq
*8-1] ; advance 8 rows = lines
1612 SIMPLE_LOOPFILTER v
, 4
1613 SIMPLE_LOOPFILTER h
, 5
1615 SIMPLE_LOOPFILTER v
, 4
1616 SIMPLE_LOOPFILTER h
, 5
1620 SIMPLE_LOOPFILTER v
, 3
1621 SIMPLE_LOOPFILTER h
, 5
1623 SIMPLE_LOOPFILTER v
, 3
1624 SIMPLE_LOOPFILTER h
, 5
1626 SIMPLE_LOOPFILTER h
, 5
1628 ;-----------------------------------------------------------------------------
1629 ; void vp8_h/v_loop_filter<size>_inner_<opt>(uint8_t *dst, [uint8_t *v,] int stride,
1630 ; int flimE, int flimI, int hev_thr);
1631 ;-----------------------------------------------------------------------------
1633 %macro INNER_LOOPFILTER
2
1634 %define stack_size
0
1635 %ifndef m8
; stack layout: [0]=E, [1]=I, [2]=hev_thr
1636 %ifidn
%1, v
; [3]=hev() result
1637 %define stack_size mmsize
* -4
1638 %else
; h ; extra storage space for transposes
1639 %define stack_size mmsize
* -5
1643 %if
%2 == 8 ; chroma
1644 cglobal vp8_
%1_loop_filter8uv_inner
, 6, 6, 13, stack_size
, dst
, dst8
, stride
, flimE
, flimI
, hevthr
1646 cglobal vp8_
%1_loop_filter16y_inner
, 5, 5, 13, stack_size
, dst
, stride
, flimE
, flimI
, hevthr
1654 ; splat function arguments
1655 SPLATB_REG m0
, flimEq
, m7
; E
1656 SPLATB_REG m1
, flimIq
, m7
; I
1657 SPLATB_REG m2
, hevthrq
, m7
; hev_thresh
1659 %define m_flimE
[rsp
]
1660 %define m_flimI
[rsp
+mmsize
]
1661 %define m_hevthr
[rsp
+mmsize
*2]
1662 %define m_maskres
[rsp
+mmsize
*3]
1663 %define m_p0backup
[rsp
+mmsize
*3]
1664 %define m_q0backup
[rsp
+mmsize
*4]
1672 %define m_hevthr m11
1673 %define m_maskres m12
1674 %define m_p0backup m12
1675 %define m_q0backup m8
1677 ; splat function arguments
1678 SPLATB_REG m_flimE
, flimEq
, m7
; E
1679 SPLATB_REG m_flimI
, flimIq
, m7
; I
1680 SPLATB_REG m_hevthr
, hevthrq
, m7
; hev_thresh
1683 %if
%2 == 8 ; chroma
1684 DEFINE_ARGS dst1
, dst8
, mstride
, stride
, dst2
1686 DEFINE_ARGS dst1
, mstride
, stride
, dst2
, cntr
1689 DEFINE_ARGS dst1
, mstride
, stride
, dst2
, dst8
1691 mov strideq
, mstrideq
1694 lea dst1q
, [dst1q
+strideq
*4-4]
1695 %if
%2 == 8 ; chroma
1696 lea dst8q
, [dst8q
+strideq
*4-4]
1704 lea dst2q
, [dst1q
+strideq
]
1706 %if
%2 == 8 && mmsize
== 16
1711 movrow m0
, [dst1q
+mstrideq
*4] ; p3
1712 movrow m1
, [dst2q
+mstrideq
*4] ; p2
1713 movrow m2
, [dst1q
+mstrideq
*2] ; p1
1714 movrow m5
, [dst2q
] ; q1
1715 movrow m6
, [dst2q
+ strideq
*1] ; q2
1716 movrow m7
, [dst2q
+ strideq
*2] ; q3
1717 %if mmsize
== 16 && %2 == 8
1718 movhps m0
, [dst8q
+mstrideq
*4]
1719 movhps m2
, [dst8q
+mstrideq
*2]
1721 movhps m1
, [dst8q
+mstrideq
*4]
1723 movhps m6
, [dst8q
+ strideq
]
1724 movhps m7
, [dst8q
+ strideq
*2]
1727 %elif mmsize
== 8 ; mmx/mmxext (h)
1728 ; read 8 rows of 8px each
1729 movu m0
, [dst1q
+mstrideq
*4]
1730 movu m1
, [dst2q
+mstrideq
*4]
1731 movu m2
, [dst1q
+mstrideq
*2]
1732 movu m3
, [dst1q
+mstrideq
]
1735 movu m6
, [dst2q
+ strideq
]
1738 TRANSPOSE4x4B
0, 1, 2, 3, 7
1740 movu m7
, [dst2q
+ strideq
*2]
1741 TRANSPOSE4x4B
4, 5, 6, 7, 1
1742 SBUTTERFLY
dq, 0, 4, 1 ; p3/p2
1743 SBUTTERFLY
dq, 2, 6, 1 ; q0/q1
1744 SBUTTERFLY
dq, 3, 7, 1 ; q2/q3
1746 mova m_q0backup
, m2
; store q0
1747 SBUTTERFLY
dq, 1, 5, 2 ; p1/p0
1748 mova m_p0backup
, m5
; store p0
1755 lea dst8q
, [dst1q
+ strideq
*8]
1758 ; read 16 rows of 8px each, interleave
1759 movh m0
, [dst1q
+mstrideq
*4]
1760 movh m1
, [dst8q
+mstrideq
*4]
1761 movh m2
, [dst1q
+mstrideq
*2]
1762 movh m5
, [dst8q
+mstrideq
*2]
1763 movh m3
, [dst1q
+mstrideq
]
1764 movh m6
, [dst8q
+mstrideq
]
1767 punpcklbw m0
, m1
; A/I
1768 punpcklbw m2
, m5
; C/K
1769 punpcklbw m3
, m6
; D/L
1770 punpcklbw m4
, m7
; E/M
1773 movh m1
, [dst2q
+mstrideq
*4]
1774 movh m6
, [dst8q
+mstrideq
*4]
1777 punpcklbw m1
, m6
; B/J
1778 punpcklbw m5
, m7
; F/N
1779 movh m6
, [dst2q
+ strideq
]
1780 movh m7
, [dst8q
+ strideq
]
1781 punpcklbw m6
, m7
; G/O
1784 TRANSPOSE4x4B
0, 1, 2, 3, 7
1790 movh m7
, [dst2q
+ strideq
*2]
1791 movh m1
, [dst8q
+ strideq
*2]
1792 punpcklbw m7
, m1
; H/P
1793 TRANSPOSE4x4B
4, 5, 6, 7, 1
1794 SBUTTERFLY
dq, 0, 4, 1 ; p3/p2
1795 SBUTTERFLY
dq, 2, 6, 1 ; q0/q1
1796 SBUTTERFLY
dq, 3, 7, 1 ; q2/q3
1802 mova m_q0backup
, m2
; store q0
1804 SBUTTERFLY
dq, 1, 5, 2 ; p1/p0
1808 mova m_p0backup
, m5
; store p0
1816 ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1
1819 psubusb m4
, m0
; p2-p3
1820 psubusb m0
, m1
; p3-p2
1821 por m0
, m4
; abs(p3-p2)
1825 psubusb m4
, m1
; p1-p2
1826 psubusb m1
, m2
; p2-p1
1827 por m1
, m4
; abs(p2-p1)
1831 psubusb m4
, m7
; q2-q3
1832 psubusb m7
, m6
; q3-q2
1833 por m7
, m4
; abs(q3-q2)
1837 psubusb m4
, m6
; q1-q2
1838 psubusb m6
, m5
; q2-q1
1839 por m6
, m4
; abs(q2-q1)
1841 %if notcpuflag
(mmxext
)
1848 pcmpeqb m0
, m3
; abs(p3-p2) <= I
1849 pcmpeqb m1
, m3
; abs(p2-p1) <= I
1850 pcmpeqb m7
, m3
; abs(q3-q2) <= I
1851 pcmpeqb m6
, m3
; abs(q2-q1) <= I
1861 ; normal_limit and high_edge_variance for p1-p0, q1-q0
1862 SWAP
7, 3 ; now m7 is zero
1864 movrow m3
, [dst1q
+mstrideq
] ; p0
1865 %if mmsize
== 16 && %2 == 8
1866 movhps m3
, [dst8q
+mstrideq
]
1878 psubusb m1
, m3
; p1-p0
1879 psubusb m6
, m2
; p0-p1
1880 por m1
, m6
; abs(p1-p0)
1881 %if notcpuflag
(mmxext
)
1884 psubusb m6
, m_hevthr
1885 pcmpeqb m1
, m7
; abs(p1-p0) <= I
1886 pcmpeqb m6
, m7
; abs(p1-p0) <= hev_thresh
1890 pmaxub m0
, m1
; max_I
1891 SWAP
1, 4 ; max_hev_thresh
1894 SWAP
6, 4 ; now m6 is I
1896 movrow m4
, [dst1q
] ; q0
1897 %if mmsize
== 16 && %2 == 8
1909 psubusb m1
, m5
; q0-q1
1910 psubusb m7
, m4
; q1-q0
1911 por m1
, m7
; abs(q1-q0)
1912 %if notcpuflag
(mmxext
)
1915 psubusb m7
, m_hevthr
1917 pcmpeqb m1
, m6
; abs(q1-q0) <= I
1918 pcmpeqb m7
, m6
; abs(q1-q0) <= hev_thresh
1920 pand m0
, m1
; abs([pq][321]-[pq][210]) <= I
1927 psubusb m6
, m_hevthr
1928 pcmpeqb m0
, m7
; max(abs(..)) <= I
1929 pcmpeqb m6
, m7
; !(max(abs..) > thresh)
1934 mova m_maskres
, m6
; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t)
1940 mova m6
, m4
; keep copies of p0/q0 around for later use
1942 psubusb m1
, m4
; p0-q0
1943 psubusb m6
, m3
; q0-p0
1944 por m1
, m6
; abs(q0-p0)
1945 paddusb m1
, m1
; m1=2*abs(q0-p0)
1951 psubusb m7
, m5
; p1-q1
1952 psubusb m6
, m2
; q1-p1
1953 por m7
, m6
; abs(q1-p1)
1956 psrlq m7
, 1 ; abs(q1-p1)/2
1957 paddusb m7
, m1
; abs(q0-p0)*2+abs(q1-p1)/2
1959 pcmpeqb m7
, m6
; abs(q0-p0)*2+abs(q1-p1)/2 <= E
1960 pand m0
, m7
; normal_limit result
1962 ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask
1963 %ifdef m8
; x86-64 && sse2
1966 %else
; x86-32 or mmx/mmxext
1967 %define m_pb_80
[pb_80
]
1973 psubsb m1
, m7
; (signed) q0-p0
1978 psubsb m6
, m7
; (signed) p1-q1
1983 paddsb m7
, m1
; 3*(q0-p0)+is4tap?(p1-q1)
2002 paddusb m3
, m1
; p0+f2
2013 paddusb m4
, m1
; q0-f1
2020 %if notcpuflag
(mmxext
)
2027 %if notcpuflag
(mmxext
)
2041 paddusb m5
, m1
; q1-a
2042 paddusb m2
, m0
; p1+a
2046 movrow
[dst1q
+mstrideq
*2], m2
2047 movrow
[dst1q
+mstrideq
], m3
2049 movrow
[dst1q
+ strideq
], m5
2050 %if mmsize
== 16 && %2 == 8
2051 movhps
[dst8q
+mstrideq
*2], m2
2052 movhps
[dst8q
+mstrideq
], m3
2054 movhps
[dst8q
+ strideq
], m5
2061 TRANSPOSE4x4B
2, 3, 4, 5, 6
2063 %if mmsize
== 8 ; mmx/mmxext (h)
2064 WRITE_4x2D
2, 3, 4, 5, dst1q
, dst2q
, mstrideq
, strideq
2066 lea dst8q
, [dst8q
+mstrideq
+2]
2067 WRITE_4x4D
2, 3, 4, 5, dst1q
, dst2q
, dst8q
, mstrideq
, strideq
, %2
2072 %if
%2 == 8 ; chroma
2081 lea dst1q
, [dst1q
+ strideq
*8-2]
2089 %else
; mmsize == 16
2096 INNER_LOOPFILTER v
, 16
2097 INNER_LOOPFILTER h
, 16
2098 INNER_LOOPFILTER v
, 8
2099 INNER_LOOPFILTER h
, 8
2102 INNER_LOOPFILTER v
, 16
2103 INNER_LOOPFILTER h
, 16
2104 INNER_LOOPFILTER v
, 8
2105 INNER_LOOPFILTER h
, 8
2109 INNER_LOOPFILTER v
, 16
2110 INNER_LOOPFILTER h
, 16
2111 INNER_LOOPFILTER v
, 8
2112 INNER_LOOPFILTER h
, 8
2115 INNER_LOOPFILTER v
, 16
2116 INNER_LOOPFILTER h
, 16
2117 INNER_LOOPFILTER v
, 8
2118 INNER_LOOPFILTER h
, 8
2120 ;-----------------------------------------------------------------------------
2121 ; void vp8_h/v_loop_filter<size>_mbedge_<opt>(uint8_t *dst, [uint8_t *v,] int stride,
2122 ; int flimE, int flimI, int hev_thr);
2123 ;-----------------------------------------------------------------------------
2125 %macro MBEDGE_LOOPFILTER
2
2126 %define stack_size
0
2127 %ifndef m8
; stack layout: [0]=E, [1]=I, [2]=hev_thr
2128 %if mmsize
== 16 ; [3]=hev() result
2129 ; [4]=filter tmp result
2130 ; [5]/[6] = p2/q2 backup
2131 ; [7]=lim_res sign result
2132 %define stack_size mmsize
* -7
2133 %else
; 8 ; extra storage space for transposes
2134 %define stack_size mmsize
* -8
2138 %if
%2 == 8 ; chroma
2139 cglobal vp8_
%1_loop_filter8uv_mbedge
, 6, 6, 15, stack_size
, dst1
, dst8
, stride
, flimE
, flimI
, hevthr
2141 cglobal vp8_
%1_loop_filter16y_mbedge
, 5, 5, 15, stack_size
, dst1
, stride
, flimE
, flimI
, hevthr
2149 ; splat function arguments
2150 SPLATB_REG m0
, flimEq
, m7
; E
2151 SPLATB_REG m1
, flimIq
, m7
; I
2152 SPLATB_REG m2
, hevthrq
, m7
; hev_thresh
2154 %define m_flimE
[rsp
]
2155 %define m_flimI
[rsp
+mmsize
]
2156 %define m_hevthr
[rsp
+mmsize
*2]
2157 %define m_maskres
[rsp
+mmsize
*3]
2158 %define m_limres
[rsp
+mmsize
*4]
2159 %define m_p0backup
[rsp
+mmsize
*3]
2160 %define m_q0backup
[rsp
+mmsize
*4]
2161 %define m_p2backup
[rsp
+mmsize
*5]
2162 %define m_q2backup
[rsp
+mmsize
*6]
2164 %define m_limsign
[rsp
]
2166 %define m_limsign
[rsp
+mmsize
*7]
2172 %else
; sse2 on x86-64
2175 %define m_hevthr m11
2176 %define m_maskres m12
2178 %define m_p0backup m12
2179 %define m_q0backup m8
2180 %define m_p2backup m13
2181 %define m_q2backup m14
2182 %define m_limsign m9
2184 ; splat function arguments
2185 SPLATB_REG m_flimE
, flimEq
, m7
; E
2186 SPLATB_REG m_flimI
, flimIq
, m7
; I
2187 SPLATB_REG m_hevthr
, hevthrq
, m7
; hev_thresh
2190 %if
%2 == 8 ; chroma
2191 DEFINE_ARGS dst1
, dst8
, mstride
, stride
, dst2
2193 DEFINE_ARGS dst1
, mstride
, stride
, dst2
, cntr
2196 DEFINE_ARGS dst1
, mstride
, stride
, dst2
, dst8
2198 mov strideq
, mstrideq
2201 lea dst1q
, [dst1q
+strideq
*4-4]
2202 %if
%2 == 8 ; chroma
2203 lea dst8q
, [dst8q
+strideq
*4-4]
2211 lea dst2q
, [dst1q
+ strideq
]
2213 %if
%2 == 8 && mmsize
== 16
2218 movrow m0
, [dst1q
+mstrideq
*4] ; p3
2219 movrow m1
, [dst2q
+mstrideq
*4] ; p2
2220 movrow m2
, [dst1q
+mstrideq
*2] ; p1
2221 movrow m5
, [dst2q
] ; q1
2222 movrow m6
, [dst2q
+ strideq
] ; q2
2223 movrow m7
, [dst2q
+ strideq
*2] ; q3
2224 %if mmsize
== 16 && %2 == 8
2225 movhps m0
, [dst8q
+mstrideq
*4]
2226 movhps m2
, [dst8q
+mstrideq
*2]
2228 movhps m1
, [dst8q
+mstrideq
*4]
2230 movhps m6
, [dst8q
+ strideq
]
2231 movhps m7
, [dst8q
+ strideq
*2]
2234 %elif mmsize
== 8 ; mmx/mmxext (h)
2235 ; read 8 rows of 8px each
2236 movu m0
, [dst1q
+mstrideq
*4]
2237 movu m1
, [dst2q
+mstrideq
*4]
2238 movu m2
, [dst1q
+mstrideq
*2]
2239 movu m3
, [dst1q
+mstrideq
]
2242 movu m6
, [dst2q
+ strideq
]
2245 TRANSPOSE4x4B
0, 1, 2, 3, 7
2247 movu m7
, [dst2q
+ strideq
*2]
2248 TRANSPOSE4x4B
4, 5, 6, 7, 1
2249 SBUTTERFLY
dq, 0, 4, 1 ; p3/p2
2250 SBUTTERFLY
dq, 2, 6, 1 ; q0/q1
2251 SBUTTERFLY
dq, 3, 7, 1 ; q2/q3
2253 mova m_q0backup
, m2
; store q0
2254 SBUTTERFLY
dq, 1, 5, 2 ; p1/p0
2255 mova m_p0backup
, m5
; store p0
2262 lea dst8q
, [dst1q
+ strideq
*8 ]
2265 ; read 16 rows of 8px each, interleave
2266 movh m0
, [dst1q
+mstrideq
*4]
2267 movh m1
, [dst8q
+mstrideq
*4]
2268 movh m2
, [dst1q
+mstrideq
*2]
2269 movh m5
, [dst8q
+mstrideq
*2]
2270 movh m3
, [dst1q
+mstrideq
]
2271 movh m6
, [dst8q
+mstrideq
]
2274 punpcklbw m0
, m1
; A/I
2275 punpcklbw m2
, m5
; C/K
2276 punpcklbw m3
, m6
; D/L
2277 punpcklbw m4
, m7
; E/M
2280 movh m1
, [dst2q
+mstrideq
*4]
2281 movh m6
, [dst8q
+mstrideq
*4]
2284 punpcklbw m1
, m6
; B/J
2285 punpcklbw m5
, m7
; F/N
2286 movh m6
, [dst2q
+ strideq
]
2287 movh m7
, [dst8q
+ strideq
]
2288 punpcklbw m6
, m7
; G/O
2291 TRANSPOSE4x4B
0, 1, 2, 3, 7
2297 movh m7
, [dst2q
+ strideq
*2]
2298 movh m1
, [dst8q
+ strideq
*2]
2299 punpcklbw m7
, m1
; H/P
2300 TRANSPOSE4x4B
4, 5, 6, 7, 1
2301 SBUTTERFLY
dq, 0, 4, 1 ; p3/p2
2302 SBUTTERFLY
dq, 2, 6, 1 ; q0/q1
2303 SBUTTERFLY
dq, 3, 7, 1 ; q2/q3
2309 mova m_q0backup
, m2
; store q0
2311 SBUTTERFLY
dq, 1, 5, 2 ; p1/p0
2315 mova m_p0backup
, m5
; store p0
2323 ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1
2326 psubusb m4
, m0
; p2-p3
2327 psubusb m0
, m1
; p3-p2
2328 por m0
, m4
; abs(p3-p2)
2332 psubusb m4
, m1
; p1-p2
2334 psubusb m1
, m2
; p2-p1
2335 por m1
, m4
; abs(p2-p1)
2339 psubusb m4
, m7
; q2-q3
2340 psubusb m7
, m6
; q3-q2
2341 por m7
, m4
; abs(q3-q2)
2345 psubusb m4
, m6
; q1-q2
2347 psubusb m6
, m5
; q2-q1
2348 por m6
, m4
; abs(q2-q1)
2350 %if notcpuflag
(mmxext
)
2357 pcmpeqb m0
, m3
; abs(p3-p2) <= I
2358 pcmpeqb m1
, m3
; abs(p2-p1) <= I
2359 pcmpeqb m7
, m3
; abs(q3-q2) <= I
2360 pcmpeqb m6
, m3
; abs(q2-q1) <= I
2370 ; normal_limit and high_edge_variance for p1-p0, q1-q0
2371 SWAP
7, 3 ; now m7 is zero
2373 movrow m3
, [dst1q
+mstrideq
] ; p0
2374 %if mmsize
== 16 && %2 == 8
2375 movhps m3
, [dst8q
+mstrideq
]
2387 psubusb m1
, m3
; p1-p0
2388 psubusb m6
, m2
; p0-p1
2389 por m1
, m6
; abs(p1-p0)
2390 %if notcpuflag
(mmxext
)
2393 psubusb m6
, m_hevthr
2394 pcmpeqb m1
, m7
; abs(p1-p0) <= I
2395 pcmpeqb m6
, m7
; abs(p1-p0) <= hev_thresh
2399 pmaxub m0
, m1
; max_I
2400 SWAP
1, 4 ; max_hev_thresh
2403 SWAP
6, 4 ; now m6 is I
2405 movrow m4
, [dst1q
] ; q0
2406 %if mmsize
== 16 && %2 == 8
2418 psubusb m1
, m5
; q0-q1
2419 psubusb m7
, m4
; q1-q0
2420 por m1
, m7
; abs(q1-q0)
2421 %if notcpuflag
(mmxext
)
2424 psubusb m7
, m_hevthr
2426 pcmpeqb m1
, m6
; abs(q1-q0) <= I
2427 pcmpeqb m7
, m6
; abs(q1-q0) <= hev_thresh
2429 pand m0
, m1
; abs([pq][321]-[pq][210]) <= I
2436 psubusb m6
, m_hevthr
2437 pcmpeqb m0
, m7
; max(abs(..)) <= I
2438 pcmpeqb m6
, m7
; !(max(abs..) > thresh)
2443 mova m_maskres
, m6
; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t)
2449 mova m6
, m4
; keep copies of p0/q0 around for later use
2451 psubusb m1
, m4
; p0-q0
2452 psubusb m6
, m3
; q0-p0
2453 por m1
, m6
; abs(q0-p0)
2454 paddusb m1
, m1
; m1=2*abs(q0-p0)
2460 psubusb m7
, m5
; p1-q1
2461 psubusb m6
, m2
; q1-p1
2462 por m7
, m6
; abs(q1-p1)
2465 psrlq m7
, 1 ; abs(q1-p1)/2
2466 paddusb m7
, m1
; abs(q0-p0)*2+abs(q1-p1)/2
2468 pcmpeqb m7
, m6
; abs(q0-p0)*2+abs(q1-p1)/2 <= E
2469 pand m0
, m7
; normal_limit result
2471 ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask
2472 %ifdef m8
; x86-64 && sse2
2475 %else
; x86-32 or mmx/mmxext
2476 %define m_pb_80
[pb_80
]
2482 psubsb m1
, m7
; (signed) q0-p0
2487 psubsb m6
, m7
; (signed) p1-q1
2494 mova m_limres
, m6
; 3*(qp-p0)+(p1-q1) masked for filter_mbedge
2501 pandn m7
, m6
; 3*(q0-p0)+(p1-q1) masked for filter_common
2519 paddusb m3
, m1
; p0+f2
2530 paddusb m4
, m1
; q0-f1
2532 ; filter_mbedge (m2-m5 = p1-q1; lim_res carries w)
2545 pcmpgtb m0
, m1
; which are negative
2547 punpcklbw m6
, m7
; interleave with "1" for rounding
2550 punpcklbw m6
, m0
; signed byte->word
2560 SWAP
0, 10 ; don't lose lim_sign copy
2573 mova m_maskres
, m6
; backup for later in filter
2582 packsswb m6
, m1
; a0
2588 mova m6
, [pb_18_63
] ; pipelining
2592 paddusb m3
, m0
; p0+a0
2593 psubusb m4
, m0
; q0-a0
2622 packsswb m6
, m1
; a1
2632 paddusb m2
, m0
; p1+a1
2633 psubusb m5
, m0
; q1-a1
2667 packsswb m6
, m1
; a1
2681 paddusb m1
, m7
; p1+a1
2682 psubusb m6
, m7
; q1-a1
2686 movrow
[dst2q
+mstrideq
*4], m1
2687 movrow
[dst1q
+mstrideq
*2], m2
2688 movrow
[dst1q
+mstrideq
], m3
2691 movrow
[dst2q
+ strideq
], m6
2692 %if mmsize
== 16 && %2 == 8
2694 movhps
[dst8q
+mstrideq
*2], m1
2695 movhps
[dst8q
+mstrideq
], m2
2699 movhps
[dst8q
+ strideq
], m5
2700 movhps
[dst8q
+ strideq
*2], m6
2707 TRANSPOSE4x4B
1, 2, 3, 4, 0
2708 SBUTTERFLY bw
, 5, 6, 0
2710 %if mmsize
== 8 ; mmx/mmxext (h)
2711 WRITE_4x2D
1, 2, 3, 4, dst1q
, dst2q
, mstrideq
, strideq
2713 WRITE_2x4W m5
, m6
, dst2q
, dst1q
, mstrideq
, strideq
2715 lea dst8q
, [dst8q
+mstrideq
+1]
2716 WRITE_4x4D
1, 2, 3, 4, dst1q
, dst2q
, dst8q
, mstrideq
, strideq
, %2
2717 lea dst1q
, [dst2q
+mstrideq
+4]
2718 lea dst8q
, [dst8q
+mstrideq
+4]
2722 WRITE_8W m5
, dst2q
, dst1q
, mstrideq
, strideq
2724 lea dst2q
, [dst8q
+ strideq
]
2726 WRITE_8W m6
, dst2q
, dst8q
, mstrideq
, strideq
2731 %if
%2 == 8 ; chroma
2740 lea dst1q
, [dst1q
+ strideq
*8-5]
2748 %else
; mmsize == 16
2755 MBEDGE_LOOPFILTER v
, 16
2756 MBEDGE_LOOPFILTER h
, 16
2757 MBEDGE_LOOPFILTER v
, 8
2758 MBEDGE_LOOPFILTER h
, 8
2761 MBEDGE_LOOPFILTER v
, 16
2762 MBEDGE_LOOPFILTER h
, 16
2763 MBEDGE_LOOPFILTER v
, 8
2764 MBEDGE_LOOPFILTER h
, 8
2768 MBEDGE_LOOPFILTER v
, 16
2769 MBEDGE_LOOPFILTER h
, 16
2770 MBEDGE_LOOPFILTER v
, 8
2771 MBEDGE_LOOPFILTER h
, 8
2774 MBEDGE_LOOPFILTER v
, 16
2775 MBEDGE_LOOPFILTER h
, 16
2776 MBEDGE_LOOPFILTER v
, 8
2777 MBEDGE_LOOPFILTER h
, 8
2780 MBEDGE_LOOPFILTER h
, 16
2781 MBEDGE_LOOPFILTER h
, 8