1 ;*****************************************************************************
2 ;* MMX/SSE2-optimized H.264 iDCT
3 ;*****************************************************************************
4 ;* Copyright (C) 2004-2005 Michael Niedermayer, Loren Merritt
5 ;* Copyright (C) 2003-2008 x264 project
7 ;* Authors: Laurent Aimar <fenrir@via.ecp.fr>
8 ;* Loren Merritt <lorenm@u.washington.edu>
9 ;* Holger Lubitz <hal@duncan.ol.sub.de>
10 ;* Min Chen <chenm001.163.com>
12 ;* This file is part of Libav.
14 ;* Libav is free software; you can redistribute it and/or
15 ;* modify it under the terms of the GNU Lesser General Public
16 ;* License as published by the Free Software Foundation; either
17 ;* version 2.1 of the License, or (at your option) any later version.
19 ;* Libav is distributed in the hope that it will be useful,
20 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
21 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 ;* Lesser General Public License for more details.
24 ;* You should have received a copy of the GNU Lesser General Public
25 ;* License along with Libav; if not, write to the Free Software
26 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 ;*****************************************************************************
29 %include "libavutil/x86/x86util.asm"
33 ; FIXME this table is a duplicate from h264data.h, and will be removed once the tables from, h264 have been split
34 scan8_mem: db 4+ 1*8, 5+ 1*8, 4+ 2*8, 5+ 2*8
35 db 6+ 1*8, 7+ 1*8, 6+ 2*8, 7+ 2*8
36 db 4+ 3*8, 5+ 3*8, 4+ 4*8, 5+ 4*8
37 db 6+ 3*8, 7+ 3*8, 6+ 4*8, 7+ 4*8
38 db 4+ 6*8, 5+ 6*8, 4+ 7*8, 5+ 7*8
39 db 6+ 6*8, 7+ 6*8, 6+ 7*8, 7+ 7*8
40 db 4+ 8*8, 5+ 8*8, 4+ 9*8, 5+ 9*8
41 db 6+ 8*8, 7+ 8*8, 6+ 9*8, 7+ 9*8
42 db 4+11*8, 5+11*8, 4+12*8, 5+12*8
43 db 6+11*8, 7+11*8, 6+12*8, 7+12*8
44 db 4+13*8, 5+13*8, 4+14*8, 5+14*8
45 db 6+13*8, 7+13*8, 6+14*8, 7+14*8
51 %define scan8 scan8_mem
59 ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
67 IDCT4_1D w
, 0, 1, 2, 3, 4, 5
69 TRANSPOSE4x4W
0, 1, 2, 3, 4
71 IDCT4_1D w
, 0, 1, 2, 3, 4, 5
74 STORE_DIFFx2 m0
, m1
, m4
, m5
, m7
, 6, %1, %3
76 STORE_DIFFx2 m2
, m3
, m4
, m5
, m7
, 6, %1, %3
80 ; ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride)
81 cglobal h264_idct_add_8
, 3, 3, 0
133 SWAP
7, 6, 4, 5, 2, 3, 1, 0 ; 70315246 -> 01234567
136 %macro IDCT8_1D_FULL
1
143 IDCT8_1D
[%1], [%1+ 64]
146 ; %1=int16_t *block, %2=int16_t *dstblock
147 %macro IDCT8_ADD_MMX_START
2
150 TRANSPOSE4x4W
0, 1, 2, 3, 7
156 TRANSPOSE4x4W
4, 5, 6, 7, 3
163 ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
164 %macro IDCT8_ADD_MMX_END
3
171 STORE_DIFFx2 m0
, m1
, m5
, m6
, m7
, 6, %1, %3
173 STORE_DIFFx2 m2
, m3
, m5
, m6
, m7
, 6, %1, %3
178 STORE_DIFFx2 m4
, m0
, m5
, m6
, m7
, 6, %1, %3
180 STORE_DIFFx2 m1
, m2
, m5
, m6
, m7
, 6, %1, %3
184 ; ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
185 cglobal h264_idct8_add_8
, 3, 4, 0
186 %assign pad
128+4-(stack_offset
&7)
190 IDCT8_ADD_MMX_START r1
, rsp
191 IDCT8_ADD_MMX_START r1
+8, rsp
+64
193 IDCT8_ADD_MMX_END r0
, rsp
, r2
194 IDCT8_ADD_MMX_END r3
, rsp
+8, r2
199 ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
200 %macro IDCT8_ADD_SSE
4
203 TRANSPOSE8x8W
0, 1, 2, 3, 4, 5, 6, 7, 8
205 TRANSPOSE8x8W
0, 1, 2, 3, 4, 5, 6, 7, [%2], [%2+16]
212 IDCT8_1D
[%2], [%2+ 16]
225 STORE_DIFF m0
, m6
, m7
, [%1 ]
226 STORE_DIFF m1
, m6
, m7
, [%1+%3 ]
227 STORE_DIFF m2
, m6
, m7
, [%1+%3*2]
228 STORE_DIFF m3
, m6
, m7
, [%1+%4 ]
237 STORE_DIFF m4
, m6
, m7
, [%1 ]
238 STORE_DIFF m5
, m6
, m7
, [%1+%3 ]
239 STORE_DIFF m0
, m6
, m7
, [%1+%3*2]
240 STORE_DIFF m1
, m6
, m7
, [%1+%4 ]
244 ; ff_h264_idct8_add_sse2(uint8_t *dst, int16_t *block, int stride)
245 cglobal h264_idct8_add_8
, 3, 4, 10
246 IDCT8_ADD_SSE r0
, r1
, r2
, r3
249 %macro DC_ADD_MMXEXT_INIT
2-3
269 %macro DC_ADD_MMXEXT_OP
4
289 ; ff_h264_idct_dc_add_mmxext(uint8_t *dst, int16_t *block, int stride)
290 cglobal h264_idct_dc_add_8
, 3, 3, 0
291 DC_ADD_MMXEXT_INIT r1
, r2
292 DC_ADD_MMXEXT_OP movh
, r0
, r2
, r1
295 ; ff_h264_idct8_dc_add_mmxext(uint8_t *dst, int16_t *block, int stride)
296 cglobal h264_idct8_dc_add_8
, 3, 3, 0
297 DC_ADD_MMXEXT_INIT r1
, r2
298 DC_ADD_MMXEXT_OP mova
, r0
, r2
, r1
300 DC_ADD_MMXEXT_OP mova
, r0
, r2
, r1
304 ; ff_h264_idct_add16_mmx(uint8_t *dst, const int *block_offset,
305 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
306 cglobal h264_idct_add16_8
, 5, 7 + npicregs
, 0, dst
, block_offset
, block
, stride
, nnzc
, cntr
, coeff
, picreg
309 lea picregq
, [scan8_mem
]
312 movzx r6
, byte [scan8
+r5
]
313 movzx r6
, byte [r4
+r6
]
316 mov r6d
, dword [r1
+r5
*4]
326 ; ff_h264_idct8_add4_mmx(uint8_t *dst, const int *block_offset,
327 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
328 cglobal h264_idct8_add4_8
, 5, 7 + npicregs
, 0, dst
, block_offset
, block
, stride
, nnzc
, cntr
, coeff
, picreg
329 %assign pad
128+4-(stack_offset
&7)
334 lea picregq
, [scan8_mem
]
337 movzx r6
, byte [scan8
+r5
]
338 movzx r6
, byte [r4
+r6
]
341 mov r6d
, dword [r1
+r5
*4]
344 IDCT8_ADD_MMX_START r2
, rsp
345 IDCT8_ADD_MMX_START r2
+8, rsp
+64
346 IDCT8_ADD_MMX_END r6
, rsp
, r3
347 mov r6d
, dword [r1
+r5
*4]
349 IDCT8_ADD_MMX_END r6
, rsp
+8, r3
359 ; ff_h264_idct_add16_mmxext(uint8_t *dst, const int *block_offset,
360 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
361 cglobal h264_idct_add16_8
, 5, 8 + npicregs
, 0, dst1
, block_offset
, block
, stride
, nnzc
, cntr
, coeff
, dst2
, picreg
364 lea picregq
, [scan8_mem
]
367 movzx r6
, byte [scan8
+r5
]
368 movzx r6
, byte [r4
+r6
]
376 DC_ADD_MMXEXT_INIT r2
, r3
, r6
381 mov dst2d
, dword [r1
+r5
*4]
382 lea dst2q
, [r0
+dst2q
]
383 DC_ADD_MMXEXT_OP movh
, dst2q
, r3
, r6
393 mov r6d
, dword [r1
+r5
*4]
404 ; ff_h264_idct_add16intra_mmx(uint8_t *dst, const int *block_offset,
405 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
406 cglobal h264_idct_add16intra_8
, 5, 7 + npicregs
, 0, dst
, block_offset
, block
, stride
, nnzc
, cntr
, coeff
, picreg
409 lea picregq
, [scan8_mem
]
412 movzx r6
, byte [scan8
+r5
]
413 movzx r6
, byte [r4
+r6
]
417 mov r6d
, dword [r1
+r5
*4]
428 ; ff_h264_idct_add16intra_mmxext(uint8_t *dst, const int *block_offset,
429 ; int16_t *block, int stride,
430 ; const uint8_t nnzc[6*8])
431 cglobal h264_idct_add16intra_8
, 5, 8 + npicregs
, 0, dst1
, block_offset
, block
, stride
, nnzc
, cntr
, coeff
, dst2
, picreg
434 lea picregq
, [scan8_mem
]
437 movzx r6
, byte [scan8
+r5
]
438 movzx r6
, byte [r4
+r6
]
441 mov r6d
, dword [r1
+r5
*4]
453 DC_ADD_MMXEXT_INIT r2
, r3
, r6
458 mov dst2d
, dword [r1
+r5
*4]
460 DC_ADD_MMXEXT_OP movh
, dst2q
, r3
, r6
471 ; ff_h264_idct8_add4_mmxext(uint8_t *dst, const int *block_offset,
472 ; int16_t *block, int stride,
473 ; const uint8_t nnzc[6*8])
474 cglobal h264_idct8_add4_8
, 5, 8 + npicregs
, 0, dst1
, block_offset
, block
, stride
, nnzc
, cntr
, coeff
, dst2
, picreg
475 %assign pad
128+4-(stack_offset
&7)
480 lea picregq
, [scan8_mem
]
483 movzx r6
, byte [scan8
+r5
]
484 movzx r6
, byte [r4
+r6
]
492 DC_ADD_MMXEXT_INIT r2
, r3
, r6
497 mov dst2d
, dword [r1
+r5
*4]
498 lea dst2q
, [r0
+dst2q
]
499 DC_ADD_MMXEXT_OP mova
, dst2q
, r3
, r6
500 lea dst2q
, [dst2q
+r3
*4]
501 DC_ADD_MMXEXT_OP mova
, dst2q
, r3
, r6
513 mov r6d
, dword [r1
+r5
*4]
516 IDCT8_ADD_MMX_START r2
, rsp
517 IDCT8_ADD_MMX_START r2
+8, rsp
+64
518 IDCT8_ADD_MMX_END r6
, rsp
, r3
519 mov r6d
, dword [r1
+r5
*4]
521 IDCT8_ADD_MMX_END r6
, rsp
+8, r3
532 ; ff_h264_idct8_add4_sse2(uint8_t *dst, const int *block_offset,
533 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
534 cglobal h264_idct8_add4_8
, 5, 8 + npicregs
, 10, dst1
, block_offset
, block
, stride
, nnzc
, cntr
, coeff
, dst2
, picreg
537 lea picregq
, [scan8_mem
]
540 movzx r6
, byte [scan8
+r5
]
541 movzx r6
, byte [r4
+r6
]
550 DC_ADD_MMXEXT_INIT r2
, r3
, r6
555 mov dst2d
, dword [r1
+r5
*4]
557 DC_ADD_MMXEXT_OP mova
, dst2q
, r3
, r6
558 lea dst2q
, [dst2q
+r3
*4]
559 DC_ADD_MMXEXT_OP mova
, dst2q
, r3
, r6
570 mov dst2d
, dword [r1
+r5
*4]
572 IDCT8_ADD_SSE dst2q
, r2
, r3
, r6
584 h264_idct_add8_mmx_plane:
586 movzx r6
, byte [scan8
+r5
]
587 movzx r6
, byte [r4
+r6
]
592 mov r0d
, dword [r1
+r5
*4]
595 mov r0
, r1m
; XXX r1m here is actually r0m of the calling func
597 add r0
, dword [r1
+r5
*4]
607 ; ff_h264_idct_add8_mmx(uint8_t **dest, const int *block_offset,
608 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
609 cglobal h264_idct_add8_8
, 5, 8 + npicregs
, 0, dst1
, block_offset
, block
, stride
, nnzc
, cntr
, coeff
, dst2
, picreg
613 lea picregq
, [scan8_mem
]
618 call h264_idct_add8_mmx_plane
626 call h264_idct_add8_mmx_plane
629 h264_idct_add8_mmxext_plane:
631 movzx r6
, byte [scan8
+r5
]
632 movzx r6
, byte [r4
+r6
]
636 mov r0d
, dword [r1
+r5
*4]
639 mov r0
, r1m
; XXX r1m here is actually r0m of the calling func
641 add r0
, dword [r1
+r5
*4]
653 DC_ADD_MMXEXT_INIT r2
, r3
, r6
655 mov r0d
, dword [r1
+r5
*4]
658 mov r0
, r1m
; XXX r1m here is actually r0m of the calling func
660 add r0
, dword [r1
+r5
*4]
662 DC_ADD_MMXEXT_OP movh
, r0
, r3
, r6
671 ; ff_h264_idct_add8_mmxext(uint8_t **dest, const int *block_offset,
672 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
673 cglobal h264_idct_add8_8
, 5, 8 + npicregs
, 0, dst1
, block_offset
, block
, stride
, nnzc
, cntr
, coeff
, dst2
, picreg
680 lea picregq
, [scan8_mem
]
682 call h264_idct_add8_mmxext_plane
690 call h264_idct_add8_mmxext_plane
693 ; r0 = uint8_t *dst, r2 = int16_t *block, r3 = int stride, r6=clobbered
694 h264_idct_dc_add8_mmxext:
695 movd m0
, [r2
] ; 0 0 X D
696 punpcklwd m0
, [r2
+32] ; x X d D
699 punpcklwd m0
, m0
; d d D D
700 pxor m1
, m1
; 0 0 0 0
701 psubw m1
, m0
; -d-d-D-D
702 packuswb m0
, m1
; -d-d-D-D d d D D
703 pshufw m1
, m0
, 0xFA ; -d-d-d-d-D-D-D-D
704 punpcklwd m0
, m0
; d d d d D D D D
706 DC_ADD_MMXEXT_OP movq
, r0
, r3
, r6
711 ; r0 = uint8_t *dst (clobbered), r2 = int16_t *block, r3 = int stride
712 h264_add8x4_idct_sse2:
721 IDCT4_1D w
,0,1,2,3,4,5
722 TRANSPOSE2x4x4W
0,1,2,3,4
724 IDCT4_1D w
,0,1,2,3,4,5
726 STORE_DIFFx2 m0
, m1
, m4
, m5
, m7
, 6, r0
, r3
728 STORE_DIFFx2 m2
, m3
, m4
, m5
, m7
, 6, r0
, r3
731 %macro add16_sse2_cycle
2
732 movzx r0
, word [r4
+%2]
735 mov r0d
, dword [r1
+%1*8]
741 call h264_add8x4_idct_sse2
748 ; ff_h264_idct_add16_sse2(uint8_t *dst, const int *block_offset,
749 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
750 cglobal h264_idct_add16_8
, 5, 5 + ARCH_X86_64
, 8
754 ; unrolling of the loop leads to an average performance gain of
756 add16_sse2_cycle
0, 0xc
757 add16_sse2_cycle
1, 0x14
758 add16_sse2_cycle
2, 0xe
759 add16_sse2_cycle
3, 0x16
760 add16_sse2_cycle
4, 0x1c
761 add16_sse2_cycle
5, 0x24
762 add16_sse2_cycle
6, 0x1e
763 add16_sse2_cycle
7, 0x26
766 %macro add16intra_sse2_cycle
2
767 movzx r0
, word [r4
+%2]
770 mov r0d
, dword [r1
+%1*8]
776 call h264_add8x4_idct_sse2
782 mov r0d
, dword [r1
+%1*8]
788 call h264_idct_dc_add8_mmxext
795 ; ff_h264_idct_add16intra_sse2(uint8_t *dst, const int *block_offset,
796 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
797 cglobal h264_idct_add16intra_8
, 5, 7 + ARCH_X86_64
, 8
801 add16intra_sse2_cycle
0, 0xc
802 add16intra_sse2_cycle
1, 0x14
803 add16intra_sse2_cycle
2, 0xe
804 add16intra_sse2_cycle
3, 0x16
805 add16intra_sse2_cycle
4, 0x1c
806 add16intra_sse2_cycle
5, 0x24
807 add16intra_sse2_cycle
6, 0x1e
808 add16intra_sse2_cycle
7, 0x26
811 %macro add8_sse2_cycle
2
812 movzx r0
, word [r4
+%2]
816 mov r0d
, dword [r1
+(%1&1)*8+64*(1+(%1>>1))]
821 add r0
, dword [r1
+(%1&1)*8+64*(1+(%1>>1))]
823 call h264_add8x4_idct_sse2
830 mov r0d
, dword [r1
+(%1&1)*8+64*(1+(%1>>1))]
835 add r0
, dword [r1
+(%1&1)*8+64*(1+(%1>>1))]
837 call h264_idct_dc_add8_mmxext
846 ; ff_h264_idct_add8_sse2(uint8_t **dest, const int *block_offset,
847 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
848 cglobal h264_idct_add8_8
, 5, 7 + ARCH_X86_64
, 8
853 add8_sse2_cycle
0, 0x34
854 add8_sse2_cycle
1, 0x3c
860 add8_sse2_cycle
2, 0x5c
861 add8_sse2_cycle
3, 0x64
864 ;void ff_h264_luma_dc_dequant_idct_mmx(int16_t *output, int16_t *input, int qmul)
867 SUMSUB_BADC w
, %4, %3, %2, %1, %5
868 SUMSUB_BADC w
, %4, %2, %3, %1, %5
894 %macro STORE_WORDS
5-9
928 %macro DEQUANT_STORE
1
951 STORE_WORDS xmm0
, 0, 1, 4, 5, 2, 3, 6, 7
952 STORE_WORDS xmm2
, 8, 9, 12, 13, 10, 11, 14, 15
954 DEQUANT_MMX m0
, m1
, %1
955 STORE_WORDS m0
, 0, 1, 4, 5
956 STORE_WORDS m1
, 2, 3, 6, 7
958 DEQUANT_MMX m2
, m3
, %1
959 STORE_WORDS m2
, 8, 9, 12, 13
960 STORE_WORDS m3
, 10, 11, 14, 15
964 %macro IDCT_DC_DEQUANT
1
965 cglobal h264_luma_dc_dequant_idct
, 3, 4, %1
966 ; manually spill XMM registers for Win64 because
967 ; the code here is initialized with INIT_MMX
974 TRANSPOSE4x4W
0,1,2,3,4
977 ; shift, tmp, output, qmul
979 DECLARE_REG_TMP
0,3,1,2
980 ; we can't avoid this, because r0 is the shift register (ecx) on win64
983 DECLARE_REG_TMP
3,1,0,2
985 DECLARE_REG_TMP
1,3,0,2