lavfi: switch to AVFrame.
[FFMpeg-mirror/mplayer-patches.git] / libavcodec / x86 / h264_idct.asm
blobde0de244286bd527f08af6957cec8e8437918165
1 ;*****************************************************************************
2 ;* MMX/SSE2-optimized H.264 iDCT
3 ;*****************************************************************************
4 ;* Copyright (C) 2004-2005 Michael Niedermayer, Loren Merritt
5 ;* Copyright (C) 2003-2008 x264 project
6 ;*
7 ;* Authors: Laurent Aimar <fenrir@via.ecp.fr>
8 ;* Loren Merritt <lorenm@u.washington.edu>
9 ;* Holger Lubitz <hal@duncan.ol.sub.de>
10 ;* Min Chen <chenm001.163.com>
12 ;* This file is part of Libav.
14 ;* Libav is free software; you can redistribute it and/or
15 ;* modify it under the terms of the GNU Lesser General Public
16 ;* License as published by the Free Software Foundation; either
17 ;* version 2.1 of the License, or (at your option) any later version.
19 ;* Libav is distributed in the hope that it will be useful,
20 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
21 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 ;* Lesser General Public License for more details.
24 ;* You should have received a copy of the GNU Lesser General Public
25 ;* License along with Libav; if not, write to the Free Software
26 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 ;*****************************************************************************
29 %include "libavutil/x86/x86util.asm"
31 SECTION_RODATA
33 ; FIXME this table is a duplicate from h264data.h, and will be removed once the tables from, h264 have been split
34 scan8_mem: db 4+ 1*8, 5+ 1*8, 4+ 2*8, 5+ 2*8
35 db 6+ 1*8, 7+ 1*8, 6+ 2*8, 7+ 2*8
36 db 4+ 3*8, 5+ 3*8, 4+ 4*8, 5+ 4*8
37 db 6+ 3*8, 7+ 3*8, 6+ 4*8, 7+ 4*8
38 db 4+ 6*8, 5+ 6*8, 4+ 7*8, 5+ 7*8
39 db 6+ 6*8, 7+ 6*8, 6+ 7*8, 7+ 7*8
40 db 4+ 8*8, 5+ 8*8, 4+ 9*8, 5+ 9*8
41 db 6+ 8*8, 7+ 8*8, 6+ 9*8, 7+ 9*8
42 db 4+11*8, 5+11*8, 4+12*8, 5+12*8
43 db 6+11*8, 7+11*8, 6+12*8, 7+12*8
44 db 4+13*8, 5+13*8, 4+14*8, 5+14*8
45 db 6+13*8, 7+13*8, 6+14*8, 7+14*8
46 %ifdef PIC
47 %define npicregs 1
48 %define scan8 picregq
49 %else
50 %define npicregs 0
51 %define scan8 scan8_mem
52 %endif
54 cextern pw_32
55 cextern pw_1
57 SECTION .text
59 ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
60 %macro IDCT4_ADD 3
61 ; Load dct coeffs
62 movq m0, [%2]
63 movq m1, [%2+8]
64 movq m2, [%2+16]
65 movq m3, [%2+24]
67 IDCT4_1D w, 0, 1, 2, 3, 4, 5
68 mova m6, [pw_32]
69 TRANSPOSE4x4W 0, 1, 2, 3, 4
70 paddw m0, m6
71 IDCT4_1D w, 0, 1, 2, 3, 4, 5
72 pxor m7, m7
74 STORE_DIFFx2 m0, m1, m4, m5, m7, 6, %1, %3
75 lea %1, [%1+%3*2]
76 STORE_DIFFx2 m2, m3, m4, m5, m7, 6, %1, %3
77 %endmacro
79 INIT_MMX mmx
80 ; ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride)
81 cglobal h264_idct_add_8, 3, 3, 0
82 IDCT4_ADD r0, r1, r2
83 RET
85 %macro IDCT8_1D 2
86 mova m0, m1
87 psraw m1, 1
88 mova m4, m5
89 psraw m4, 1
90 paddw m4, m5
91 paddw m1, m0
92 paddw m4, m7
93 paddw m1, m5
94 psubw m4, m0
95 paddw m1, m3
97 psubw m0, m3
98 psubw m5, m3
99 psraw m3, 1
100 paddw m0, m7
101 psubw m5, m7
102 psraw m7, 1
103 psubw m0, m3
104 psubw m5, m7
106 mova m7, m1
107 psraw m1, 2
108 mova m3, m4
109 psraw m3, 2
110 paddw m3, m0
111 psraw m0, 2
112 paddw m1, m5
113 psraw m5, 2
114 psubw m0, m4
115 psubw m7, m5
117 mova m5, m6
118 psraw m6, 1
119 mova m4, m2
120 psraw m4, 1
121 paddw m6, m2
122 psubw m4, m5
124 mova m2, %1
125 mova m5, %2
126 SUMSUB_BA w, 5, 2
127 SUMSUB_BA w, 6, 5
128 SUMSUB_BA w, 4, 2
129 SUMSUB_BA w, 7, 6
130 SUMSUB_BA w, 0, 4
131 SUMSUB_BA w, 3, 2
132 SUMSUB_BA w, 1, 5
133 SWAP 7, 6, 4, 5, 2, 3, 1, 0 ; 70315246 -> 01234567
134 %endmacro
136 %macro IDCT8_1D_FULL 1
137 mova m7, [%1+112]
138 mova m6, [%1+ 96]
139 mova m5, [%1+ 80]
140 mova m3, [%1+ 48]
141 mova m2, [%1+ 32]
142 mova m1, [%1+ 16]
143 IDCT8_1D [%1], [%1+ 64]
144 %endmacro
146 ; %1=int16_t *block, %2=int16_t *dstblock
147 %macro IDCT8_ADD_MMX_START 2
148 IDCT8_1D_FULL %1
149 mova [%1], m7
150 TRANSPOSE4x4W 0, 1, 2, 3, 7
151 mova m7, [%1]
152 mova [%2 ], m0
153 mova [%2+16], m1
154 mova [%2+32], m2
155 mova [%2+48], m3
156 TRANSPOSE4x4W 4, 5, 6, 7, 3
157 mova [%2+ 8], m4
158 mova [%2+24], m5
159 mova [%2+40], m6
160 mova [%2+56], m7
161 %endmacro
163 ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
164 %macro IDCT8_ADD_MMX_END 3
165 IDCT8_1D_FULL %2
166 mova [%2 ], m5
167 mova [%2+16], m6
168 mova [%2+32], m7
170 pxor m7, m7
171 STORE_DIFFx2 m0, m1, m5, m6, m7, 6, %1, %3
172 lea %1, [%1+%3*2]
173 STORE_DIFFx2 m2, m3, m5, m6, m7, 6, %1, %3
174 mova m0, [%2 ]
175 mova m1, [%2+16]
176 mova m2, [%2+32]
177 lea %1, [%1+%3*2]
178 STORE_DIFFx2 m4, m0, m5, m6, m7, 6, %1, %3
179 lea %1, [%1+%3*2]
180 STORE_DIFFx2 m1, m2, m5, m6, m7, 6, %1, %3
181 %endmacro
183 INIT_MMX mmx
184 ; ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
185 cglobal h264_idct8_add_8, 3, 4, 0
186 %assign pad 128+4-(stack_offset&7)
187 SUB rsp, pad
189 add word [r1], 32
190 IDCT8_ADD_MMX_START r1 , rsp
191 IDCT8_ADD_MMX_START r1+8, rsp+64
192 lea r3, [r0+4]
193 IDCT8_ADD_MMX_END r0 , rsp, r2
194 IDCT8_ADD_MMX_END r3 , rsp+8, r2
196 ADD rsp, pad
199 ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
200 %macro IDCT8_ADD_SSE 4
201 IDCT8_1D_FULL %2
202 %if ARCH_X86_64
203 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
204 %else
205 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [%2], [%2+16]
206 %endif
207 paddw m0, [pw_32]
209 %if ARCH_X86_64 == 0
210 mova [%2 ], m0
211 mova [%2+16], m4
212 IDCT8_1D [%2], [%2+ 16]
213 mova [%2 ], m6
214 mova [%2+16], m7
215 %else
216 SWAP 0, 8
217 SWAP 4, 9
218 IDCT8_1D m8, m9
219 SWAP 6, 8
220 SWAP 7, 9
221 %endif
223 pxor m7, m7
224 lea %4, [%3*3]
225 STORE_DIFF m0, m6, m7, [%1 ]
226 STORE_DIFF m1, m6, m7, [%1+%3 ]
227 STORE_DIFF m2, m6, m7, [%1+%3*2]
228 STORE_DIFF m3, m6, m7, [%1+%4 ]
229 %if ARCH_X86_64 == 0
230 mova m0, [%2 ]
231 mova m1, [%2+16]
232 %else
233 SWAP 0, 8
234 SWAP 1, 9
235 %endif
236 lea %1, [%1+%3*4]
237 STORE_DIFF m4, m6, m7, [%1 ]
238 STORE_DIFF m5, m6, m7, [%1+%3 ]
239 STORE_DIFF m0, m6, m7, [%1+%3*2]
240 STORE_DIFF m1, m6, m7, [%1+%4 ]
241 %endmacro
243 INIT_XMM sse2
244 ; ff_h264_idct8_add_sse2(uint8_t *dst, int16_t *block, int stride)
245 cglobal h264_idct8_add_8, 3, 4, 10
246 IDCT8_ADD_SSE r0, r1, r2, r3
249 %macro DC_ADD_MMXEXT_INIT 2-3
250 %if %0 == 2
251 movsx %1, word [%1]
252 add %1, 32
253 sar %1, 6
254 movd m0, %1d
255 lea %1, [%2*3]
256 %else
257 add %3, 32
258 sar %3, 6
259 movd m0, %3d
260 lea %3, [%2*3]
261 %endif
262 pshufw m0, m0, 0
263 pxor m1, m1
264 psubw m1, m0
265 packuswb m0, m0
266 packuswb m1, m1
267 %endmacro
269 %macro DC_ADD_MMXEXT_OP 4
270 %1 m2, [%2 ]
271 %1 m3, [%2+%3 ]
272 %1 m4, [%2+%3*2]
273 %1 m5, [%2+%4 ]
274 paddusb m2, m0
275 paddusb m3, m0
276 paddusb m4, m0
277 paddusb m5, m0
278 psubusb m2, m1
279 psubusb m3, m1
280 psubusb m4, m1
281 psubusb m5, m1
282 %1 [%2 ], m2
283 %1 [%2+%3 ], m3
284 %1 [%2+%3*2], m4
285 %1 [%2+%4 ], m5
286 %endmacro
288 INIT_MMX mmxext
289 ; ff_h264_idct_dc_add_mmxext(uint8_t *dst, int16_t *block, int stride)
290 cglobal h264_idct_dc_add_8, 3, 3, 0
291 DC_ADD_MMXEXT_INIT r1, r2
292 DC_ADD_MMXEXT_OP movh, r0, r2, r1
295 ; ff_h264_idct8_dc_add_mmxext(uint8_t *dst, int16_t *block, int stride)
296 cglobal h264_idct8_dc_add_8, 3, 3, 0
297 DC_ADD_MMXEXT_INIT r1, r2
298 DC_ADD_MMXEXT_OP mova, r0, r2, r1
299 lea r0, [r0+r2*4]
300 DC_ADD_MMXEXT_OP mova, r0, r2, r1
303 INIT_MMX mmx
304 ; ff_h264_idct_add16_mmx(uint8_t *dst, const int *block_offset,
305 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
306 cglobal h264_idct_add16_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
307 xor r5, r5
308 %ifdef PIC
309 lea picregq, [scan8_mem]
310 %endif
311 .nextblock:
312 movzx r6, byte [scan8+r5]
313 movzx r6, byte [r4+r6]
314 test r6, r6
315 jz .skipblock
316 mov r6d, dword [r1+r5*4]
317 lea r6, [r0+r6]
318 IDCT4_ADD r6, r2, r3
319 .skipblock:
320 inc r5
321 add r2, 32
322 cmp r5, 16
323 jl .nextblock
324 REP_RET
326 ; ff_h264_idct8_add4_mmx(uint8_t *dst, const int *block_offset,
327 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
328 cglobal h264_idct8_add4_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
329 %assign pad 128+4-(stack_offset&7)
330 SUB rsp, pad
332 xor r5, r5
333 %ifdef PIC
334 lea picregq, [scan8_mem]
335 %endif
336 .nextblock:
337 movzx r6, byte [scan8+r5]
338 movzx r6, byte [r4+r6]
339 test r6, r6
340 jz .skipblock
341 mov r6d, dword [r1+r5*4]
342 add r6, r0
343 add word [r2], 32
344 IDCT8_ADD_MMX_START r2 , rsp
345 IDCT8_ADD_MMX_START r2+8, rsp+64
346 IDCT8_ADD_MMX_END r6 , rsp, r3
347 mov r6d, dword [r1+r5*4]
348 lea r6, [r0+r6+4]
349 IDCT8_ADD_MMX_END r6 , rsp+8, r3
350 .skipblock:
351 add r5, 4
352 add r2, 128
353 cmp r5, 16
354 jl .nextblock
355 ADD rsp, pad
358 INIT_MMX mmxext
359 ; ff_h264_idct_add16_mmxext(uint8_t *dst, const int *block_offset,
360 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
361 cglobal h264_idct_add16_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
362 xor r5, r5
363 %ifdef PIC
364 lea picregq, [scan8_mem]
365 %endif
366 .nextblock:
367 movzx r6, byte [scan8+r5]
368 movzx r6, byte [r4+r6]
369 test r6, r6
370 jz .skipblock
371 cmp r6, 1
372 jnz .no_dc
373 movsx r6, word [r2]
374 test r6, r6
375 jz .no_dc
376 DC_ADD_MMXEXT_INIT r2, r3, r6
377 %if ARCH_X86_64 == 0
378 %define dst2q r1
379 %define dst2d r1d
380 %endif
381 mov dst2d, dword [r1+r5*4]
382 lea dst2q, [r0+dst2q]
383 DC_ADD_MMXEXT_OP movh, dst2q, r3, r6
384 %if ARCH_X86_64 == 0
385 mov r1, r1m
386 %endif
387 inc r5
388 add r2, 32
389 cmp r5, 16
390 jl .nextblock
391 REP_RET
392 .no_dc:
393 mov r6d, dword [r1+r5*4]
394 add r6, r0
395 IDCT4_ADD r6, r2, r3
396 .skipblock:
397 inc r5
398 add r2, 32
399 cmp r5, 16
400 jl .nextblock
401 REP_RET
403 INIT_MMX mmx
404 ; ff_h264_idct_add16intra_mmx(uint8_t *dst, const int *block_offset,
405 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
406 cglobal h264_idct_add16intra_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
407 xor r5, r5
408 %ifdef PIC
409 lea picregq, [scan8_mem]
410 %endif
411 .nextblock:
412 movzx r6, byte [scan8+r5]
413 movzx r6, byte [r4+r6]
414 or r6w, word [r2]
415 test r6, r6
416 jz .skipblock
417 mov r6d, dword [r1+r5*4]
418 add r6, r0
419 IDCT4_ADD r6, r2, r3
420 .skipblock:
421 inc r5
422 add r2, 32
423 cmp r5, 16
424 jl .nextblock
425 REP_RET
427 INIT_MMX mmxext
428 ; ff_h264_idct_add16intra_mmxext(uint8_t *dst, const int *block_offset,
429 ; int16_t *block, int stride,
430 ; const uint8_t nnzc[6*8])
431 cglobal h264_idct_add16intra_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
432 xor r5, r5
433 %ifdef PIC
434 lea picregq, [scan8_mem]
435 %endif
436 .nextblock:
437 movzx r6, byte [scan8+r5]
438 movzx r6, byte [r4+r6]
439 test r6, r6
440 jz .try_dc
441 mov r6d, dword [r1+r5*4]
442 lea r6, [r0+r6]
443 IDCT4_ADD r6, r2, r3
444 inc r5
445 add r2, 32
446 cmp r5, 16
447 jl .nextblock
448 REP_RET
449 .try_dc:
450 movsx r6, word [r2]
451 test r6, r6
452 jz .skipblock
453 DC_ADD_MMXEXT_INIT r2, r3, r6
454 %if ARCH_X86_64 == 0
455 %define dst2q r1
456 %define dst2d r1d
457 %endif
458 mov dst2d, dword [r1+r5*4]
459 add dst2q, r0
460 DC_ADD_MMXEXT_OP movh, dst2q, r3, r6
461 %if ARCH_X86_64 == 0
462 mov r1, r1m
463 %endif
464 .skipblock:
465 inc r5
466 add r2, 32
467 cmp r5, 16
468 jl .nextblock
469 REP_RET
471 ; ff_h264_idct8_add4_mmxext(uint8_t *dst, const int *block_offset,
472 ; int16_t *block, int stride,
473 ; const uint8_t nnzc[6*8])
474 cglobal h264_idct8_add4_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
475 %assign pad 128+4-(stack_offset&7)
476 SUB rsp, pad
478 xor r5, r5
479 %ifdef PIC
480 lea picregq, [scan8_mem]
481 %endif
482 .nextblock:
483 movzx r6, byte [scan8+r5]
484 movzx r6, byte [r4+r6]
485 test r6, r6
486 jz .skipblock
487 cmp r6, 1
488 jnz .no_dc
489 movsx r6, word [r2]
490 test r6, r6
491 jz .no_dc
492 DC_ADD_MMXEXT_INIT r2, r3, r6
493 %if ARCH_X86_64 == 0
494 %define dst2q r1
495 %define dst2d r1d
496 %endif
497 mov dst2d, dword [r1+r5*4]
498 lea dst2q, [r0+dst2q]
499 DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
500 lea dst2q, [dst2q+r3*4]
501 DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
502 %if ARCH_X86_64 == 0
503 mov r1, r1m
504 %endif
505 add r5, 4
506 add r2, 128
507 cmp r5, 16
508 jl .nextblock
510 ADD rsp, pad
512 .no_dc:
513 mov r6d, dword [r1+r5*4]
514 add r6, r0
515 add word [r2], 32
516 IDCT8_ADD_MMX_START r2 , rsp
517 IDCT8_ADD_MMX_START r2+8, rsp+64
518 IDCT8_ADD_MMX_END r6 , rsp, r3
519 mov r6d, dword [r1+r5*4]
520 lea r6, [r0+r6+4]
521 IDCT8_ADD_MMX_END r6 , rsp+8, r3
522 .skipblock:
523 add r5, 4
524 add r2, 128
525 cmp r5, 16
526 jl .nextblock
528 ADD rsp, pad
531 INIT_XMM sse2
532 ; ff_h264_idct8_add4_sse2(uint8_t *dst, const int *block_offset,
533 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
534 cglobal h264_idct8_add4_8, 5, 8 + npicregs, 10, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
535 xor r5, r5
536 %ifdef PIC
537 lea picregq, [scan8_mem]
538 %endif
539 .nextblock:
540 movzx r6, byte [scan8+r5]
541 movzx r6, byte [r4+r6]
542 test r6, r6
543 jz .skipblock
544 cmp r6, 1
545 jnz .no_dc
546 movsx r6, word [r2]
547 test r6, r6
548 jz .no_dc
549 INIT_MMX cpuname
550 DC_ADD_MMXEXT_INIT r2, r3, r6
551 %if ARCH_X86_64 == 0
552 %define dst2q r1
553 %define dst2d r1d
554 %endif
555 mov dst2d, dword [r1+r5*4]
556 add dst2q, r0
557 DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
558 lea dst2q, [dst2q+r3*4]
559 DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
560 %if ARCH_X86_64 == 0
561 mov r1, r1m
562 %endif
563 add r5, 4
564 add r2, 128
565 cmp r5, 16
566 jl .nextblock
567 REP_RET
568 .no_dc:
569 INIT_XMM cpuname
570 mov dst2d, dword [r1+r5*4]
571 add dst2q, r0
572 IDCT8_ADD_SSE dst2q, r2, r3, r6
573 %if ARCH_X86_64 == 0
574 mov r1, r1m
575 %endif
576 .skipblock:
577 add r5, 4
578 add r2, 128
579 cmp r5, 16
580 jl .nextblock
581 REP_RET
583 INIT_MMX mmx
584 h264_idct_add8_mmx_plane:
585 .nextblock:
586 movzx r6, byte [scan8+r5]
587 movzx r6, byte [r4+r6]
588 or r6w, word [r2]
589 test r6, r6
590 jz .skipblock
591 %if ARCH_X86_64
592 mov r0d, dword [r1+r5*4]
593 add r0, [dst2q]
594 %else
595 mov r0, r1m ; XXX r1m here is actually r0m of the calling func
596 mov r0, [r0]
597 add r0, dword [r1+r5*4]
598 %endif
599 IDCT4_ADD r0, r2, r3
600 .skipblock:
601 inc r5
602 add r2, 32
603 test r5, 3
604 jnz .nextblock
605 rep ret
607 ; ff_h264_idct_add8_mmx(uint8_t **dest, const int *block_offset,
608 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
609 cglobal h264_idct_add8_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
610 mov r5, 16
611 add r2, 512
612 %ifdef PIC
613 lea picregq, [scan8_mem]
614 %endif
615 %if ARCH_X86_64
616 mov dst2q, r0
617 %endif
618 call h264_idct_add8_mmx_plane
619 mov r5, 32
620 add r2, 384
621 %if ARCH_X86_64
622 add dst2q, gprsize
623 %else
624 add r0mp, gprsize
625 %endif
626 call h264_idct_add8_mmx_plane
629 h264_idct_add8_mmxext_plane:
630 .nextblock:
631 movzx r6, byte [scan8+r5]
632 movzx r6, byte [r4+r6]
633 test r6, r6
634 jz .try_dc
635 %if ARCH_X86_64
636 mov r0d, dword [r1+r5*4]
637 add r0, [dst2q]
638 %else
639 mov r0, r1m ; XXX r1m here is actually r0m of the calling func
640 mov r0, [r0]
641 add r0, dword [r1+r5*4]
642 %endif
643 IDCT4_ADD r0, r2, r3
644 inc r5
645 add r2, 32
646 test r5, 3
647 jnz .nextblock
648 rep ret
649 .try_dc:
650 movsx r6, word [r2]
651 test r6, r6
652 jz .skipblock
653 DC_ADD_MMXEXT_INIT r2, r3, r6
654 %if ARCH_X86_64
655 mov r0d, dword [r1+r5*4]
656 add r0, [dst2q]
657 %else
658 mov r0, r1m ; XXX r1m here is actually r0m of the calling func
659 mov r0, [r0]
660 add r0, dword [r1+r5*4]
661 %endif
662 DC_ADD_MMXEXT_OP movh, r0, r3, r6
663 .skipblock:
664 inc r5
665 add r2, 32
666 test r5, 3
667 jnz .nextblock
668 rep ret
670 INIT_MMX mmxext
671 ; ff_h264_idct_add8_mmxext(uint8_t **dest, const int *block_offset,
672 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
673 cglobal h264_idct_add8_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
674 mov r5, 16
675 add r2, 512
676 %if ARCH_X86_64
677 mov dst2q, r0
678 %endif
679 %ifdef PIC
680 lea picregq, [scan8_mem]
681 %endif
682 call h264_idct_add8_mmxext_plane
683 mov r5, 32
684 add r2, 384
685 %if ARCH_X86_64
686 add dst2q, gprsize
687 %else
688 add r0mp, gprsize
689 %endif
690 call h264_idct_add8_mmxext_plane
693 ; r0 = uint8_t *dst, r2 = int16_t *block, r3 = int stride, r6=clobbered
694 h264_idct_dc_add8_mmxext:
695 movd m0, [r2 ] ; 0 0 X D
696 punpcklwd m0, [r2+32] ; x X d D
697 paddsw m0, [pw_32]
698 psraw m0, 6
699 punpcklwd m0, m0 ; d d D D
700 pxor m1, m1 ; 0 0 0 0
701 psubw m1, m0 ; -d-d-D-D
702 packuswb m0, m1 ; -d-d-D-D d d D D
703 pshufw m1, m0, 0xFA ; -d-d-d-d-D-D-D-D
704 punpcklwd m0, m0 ; d d d d D D D D
705 lea r6, [r3*3]
706 DC_ADD_MMXEXT_OP movq, r0, r3, r6
709 ALIGN 16
710 INIT_XMM sse2
711 ; r0 = uint8_t *dst (clobbered), r2 = int16_t *block, r3 = int stride
712 h264_add8x4_idct_sse2:
713 movq m0, [r2+ 0]
714 movq m1, [r2+ 8]
715 movq m2, [r2+16]
716 movq m3, [r2+24]
717 movhps m0, [r2+32]
718 movhps m1, [r2+40]
719 movhps m2, [r2+48]
720 movhps m3, [r2+56]
721 IDCT4_1D w,0,1,2,3,4,5
722 TRANSPOSE2x4x4W 0,1,2,3,4
723 paddw m0, [pw_32]
724 IDCT4_1D w,0,1,2,3,4,5
725 pxor m7, m7
726 STORE_DIFFx2 m0, m1, m4, m5, m7, 6, r0, r3
727 lea r0, [r0+r3*2]
728 STORE_DIFFx2 m2, m3, m4, m5, m7, 6, r0, r3
731 %macro add16_sse2_cycle 2
732 movzx r0, word [r4+%2]
733 test r0, r0
734 jz .cycle%1end
735 mov r0d, dword [r1+%1*8]
736 %if ARCH_X86_64
737 add r0, r5
738 %else
739 add r0, r0m
740 %endif
741 call h264_add8x4_idct_sse2
742 .cycle%1end:
743 %if %1 < 7
744 add r2, 64
745 %endif
746 %endmacro
748 ; ff_h264_idct_add16_sse2(uint8_t *dst, const int *block_offset,
749 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
750 cglobal h264_idct_add16_8, 5, 5 + ARCH_X86_64, 8
751 %if ARCH_X86_64
752 mov r5, r0
753 %endif
754 ; unrolling of the loop leads to an average performance gain of
755 ; 20-25%
756 add16_sse2_cycle 0, 0xc
757 add16_sse2_cycle 1, 0x14
758 add16_sse2_cycle 2, 0xe
759 add16_sse2_cycle 3, 0x16
760 add16_sse2_cycle 4, 0x1c
761 add16_sse2_cycle 5, 0x24
762 add16_sse2_cycle 6, 0x1e
763 add16_sse2_cycle 7, 0x26
766 %macro add16intra_sse2_cycle 2
767 movzx r0, word [r4+%2]
768 test r0, r0
769 jz .try%1dc
770 mov r0d, dword [r1+%1*8]
771 %if ARCH_X86_64
772 add r0, r7
773 %else
774 add r0, r0m
775 %endif
776 call h264_add8x4_idct_sse2
777 jmp .cycle%1end
778 .try%1dc:
779 movsx r0, word [r2 ]
780 or r0w, word [r2+32]
781 jz .cycle%1end
782 mov r0d, dword [r1+%1*8]
783 %if ARCH_X86_64
784 add r0, r7
785 %else
786 add r0, r0m
787 %endif
788 call h264_idct_dc_add8_mmxext
789 .cycle%1end:
790 %if %1 < 7
791 add r2, 64
792 %endif
793 %endmacro
795 ; ff_h264_idct_add16intra_sse2(uint8_t *dst, const int *block_offset,
796 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
797 cglobal h264_idct_add16intra_8, 5, 7 + ARCH_X86_64, 8
798 %if ARCH_X86_64
799 mov r7, r0
800 %endif
801 add16intra_sse2_cycle 0, 0xc
802 add16intra_sse2_cycle 1, 0x14
803 add16intra_sse2_cycle 2, 0xe
804 add16intra_sse2_cycle 3, 0x16
805 add16intra_sse2_cycle 4, 0x1c
806 add16intra_sse2_cycle 5, 0x24
807 add16intra_sse2_cycle 6, 0x1e
808 add16intra_sse2_cycle 7, 0x26
811 %macro add8_sse2_cycle 2
812 movzx r0, word [r4+%2]
813 test r0, r0
814 jz .try%1dc
815 %if ARCH_X86_64
816 mov r0d, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
817 add r0, [r7]
818 %else
819 mov r0, r0m
820 mov r0, [r0]
821 add r0, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
822 %endif
823 call h264_add8x4_idct_sse2
824 jmp .cycle%1end
825 .try%1dc:
826 movsx r0, word [r2 ]
827 or r0w, word [r2+32]
828 jz .cycle%1end
829 %if ARCH_X86_64
830 mov r0d, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
831 add r0, [r7]
832 %else
833 mov r0, r0m
834 mov r0, [r0]
835 add r0, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
836 %endif
837 call h264_idct_dc_add8_mmxext
838 .cycle%1end:
839 %if %1 == 1
840 add r2, 384+64
841 %elif %1 < 3
842 add r2, 64
843 %endif
844 %endmacro
846 ; ff_h264_idct_add8_sse2(uint8_t **dest, const int *block_offset,
847 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
848 cglobal h264_idct_add8_8, 5, 7 + ARCH_X86_64, 8
849 add r2, 512
850 %if ARCH_X86_64
851 mov r7, r0
852 %endif
853 add8_sse2_cycle 0, 0x34
854 add8_sse2_cycle 1, 0x3c
855 %if ARCH_X86_64
856 add r7, gprsize
857 %else
858 add r0mp, gprsize
859 %endif
860 add8_sse2_cycle 2, 0x5c
861 add8_sse2_cycle 3, 0x64
864 ;void ff_h264_luma_dc_dequant_idct_mmx(int16_t *output, int16_t *input, int qmul)
866 %macro WALSH4_1D 5
867 SUMSUB_BADC w, %4, %3, %2, %1, %5
868 SUMSUB_BADC w, %4, %2, %3, %1, %5
869 SWAP %1, %4, %3
870 %endmacro
872 %macro DEQUANT_MMX 3
873 mova m7, [pw_1]
874 mova m4, %1
875 punpcklwd %1, m7
876 punpckhwd m4, m7
877 mova m5, %2
878 punpcklwd %2, m7
879 punpckhwd m5, m7
880 movd m7, t3d
881 punpckldq m7, m7
882 pmaddwd %1, m7
883 pmaddwd %2, m7
884 pmaddwd m4, m7
885 pmaddwd m5, m7
886 psrad %1, %3
887 psrad %2, %3
888 psrad m4, %3
889 psrad m5, %3
890 packssdw %1, m4
891 packssdw %2, m5
892 %endmacro
894 %macro STORE_WORDS 5-9
895 %if cpuflag(sse)
896 movd t0d, %1
897 psrldq %1, 4
898 movd t1d, %1
899 psrldq %1, 4
900 mov [t2+%2*32], t0w
901 mov [t2+%4*32], t1w
902 shr t0d, 16
903 shr t1d, 16
904 mov [t2+%3*32], t0w
905 mov [t2+%5*32], t1w
906 movd t0d, %1
907 psrldq %1, 4
908 movd t1d, %1
909 mov [t2+%6*32], t0w
910 mov [t2+%8*32], t1w
911 shr t0d, 16
912 shr t1d, 16
913 mov [t2+%7*32], t0w
914 mov [t2+%9*32], t1w
915 %else
916 movd t0d, %1
917 psrlq %1, 32
918 movd t1d, %1
919 mov [t2+%2*32], t0w
920 mov [t2+%4*32], t1w
921 shr t0d, 16
922 shr t1d, 16
923 mov [t2+%3*32], t0w
924 mov [t2+%5*32], t1w
925 %endif
926 %endmacro
928 %macro DEQUANT_STORE 1
929 %if cpuflag(sse2)
930 movd xmm4, t3d
931 movq xmm5, [pw_1]
932 pshufd xmm4, xmm4, 0
933 movq2dq xmm0, m0
934 movq2dq xmm1, m1
935 movq2dq xmm2, m2
936 movq2dq xmm3, m3
937 punpcklwd xmm0, xmm5
938 punpcklwd xmm1, xmm5
939 punpcklwd xmm2, xmm5
940 punpcklwd xmm3, xmm5
941 pmaddwd xmm0, xmm4
942 pmaddwd xmm1, xmm4
943 pmaddwd xmm2, xmm4
944 pmaddwd xmm3, xmm4
945 psrad xmm0, %1
946 psrad xmm1, %1
947 psrad xmm2, %1
948 psrad xmm3, %1
949 packssdw xmm0, xmm1
950 packssdw xmm2, xmm3
951 STORE_WORDS xmm0, 0, 1, 4, 5, 2, 3, 6, 7
952 STORE_WORDS xmm2, 8, 9, 12, 13, 10, 11, 14, 15
953 %else
954 DEQUANT_MMX m0, m1, %1
955 STORE_WORDS m0, 0, 1, 4, 5
956 STORE_WORDS m1, 2, 3, 6, 7
958 DEQUANT_MMX m2, m3, %1
959 STORE_WORDS m2, 8, 9, 12, 13
960 STORE_WORDS m3, 10, 11, 14, 15
961 %endif
962 %endmacro
964 %macro IDCT_DC_DEQUANT 1
965 cglobal h264_luma_dc_dequant_idct, 3, 4, %1
966 ; manually spill XMM registers for Win64 because
967 ; the code here is initialized with INIT_MMX
968 WIN64_SPILL_XMM %1
969 movq m3, [r1+24]
970 movq m2, [r1+16]
971 movq m1, [r1+ 8]
972 movq m0, [r1+ 0]
973 WALSH4_1D 0,1,2,3,4
974 TRANSPOSE4x4W 0,1,2,3,4
975 WALSH4_1D 0,1,2,3,4
977 ; shift, tmp, output, qmul
978 %if WIN64
979 DECLARE_REG_TMP 0,3,1,2
980 ; we can't avoid this, because r0 is the shift register (ecx) on win64
981 xchg r0, t2
982 %elif ARCH_X86_64
983 DECLARE_REG_TMP 3,1,0,2
984 %else
985 DECLARE_REG_TMP 1,3,0,2
986 %endif
988 cmp t3d, 32767
989 jg .big_qmul
990 add t3d, 128 << 16
991 DEQUANT_STORE 8
993 .big_qmul:
994 bsr t0d, t3d
995 add t3d, 128 << 16
996 mov t1d, 7
997 cmp t0d, t1d
998 cmovg t0d, t1d
999 inc t1d
1000 shr t3d, t0b
1001 sub t1d, t0d
1002 %if cpuflag(sse2)
1003 movd xmm6, t1d
1004 DEQUANT_STORE xmm6
1005 %else
1006 movd m6, t1d
1007 DEQUANT_STORE m6
1008 %endif
1010 %endmacro
1012 INIT_MMX mmx
1013 IDCT_DC_DEQUANT 0
1014 INIT_MMX sse2
1015 IDCT_DC_DEQUANT 7