2 @ ARMv4 optimized DSP utils
3 @ Copyright (c) 2004 AGAWA Koji <i (AT) atty (DOT) jp>
5 @ This file is part of FFmpeg.
7 @ FFmpeg is free software; you can redistribute it and/or
8 @ modify it under the terms of the GNU Lesser General Public
9 @ License as published by the Free Software Foundation; either
10 @ version 2.1 of the License, or (at your option) any later version.
12 @ FFmpeg is distributed in the hope that it will be useful,
13 @ but WITHOUT ANY WARRANTY; without even the implied warranty of
14 @ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 @ Lesser General Public License for more details.
17 @ You should have received a copy of the GNU Lesser General Public
18 @ License along with FFmpeg; if not, write to the Free Software
19 @ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
33 function ff_prefetch_arm, export=1
42 .macro ADJ_ALIGN_QUADWORD_D shift, Rd0, Rd1, Rd2, Rd3, Rn0, Rn1, Rn2, Rn3, Rn4
43 mov \Rd0, \Rn0, lsr #(\shift * 8)
44 mov \Rd1, \Rn1, lsr #(\shift * 8)
45 mov \Rd2, \Rn2, lsr #(\shift * 8)
46 mov \Rd3, \Rn3, lsr #(\shift * 8)
47 orr \Rd0, \Rd0, \Rn1, lsl #(32 - \shift * 8)
48 orr \Rd1, \Rd1, \Rn2, lsl #(32 - \shift * 8)
49 orr \Rd2, \Rd2, \Rn3, lsl #(32 - \shift * 8)
50 orr \Rd3, \Rd3, \Rn4, lsl #(32 - \shift * 8)
52 .macro ADJ_ALIGN_DOUBLEWORD shift, R0, R1, R2
53 mov \R0, \R0, lsr #(\shift * 8)
54 orr \R0, \R0, \R1, lsl #(32 - \shift * 8)
55 mov \R1, \R1, lsr #(\shift * 8)
56 orr \R1, \R1, \R2, lsl #(32 - \shift * 8)
58 .macro ADJ_ALIGN_DOUBLEWORD_D shift, Rdst0, Rdst1, Rsrc0, Rsrc1, Rsrc2
59 mov \Rdst0, \Rsrc0, lsr #(\shift * 8)
60 mov \Rdst1, \Rsrc1, lsr #(\shift * 8)
61 orr \Rdst0, \Rdst0, \Rsrc1, lsl #(32 - (\shift * 8))
62 orr \Rdst1, \Rdst1, \Rsrc2, lsl #(32 - (\shift * 8))
65 .macro RND_AVG32 Rd0, Rd1, Rn0, Rn1, Rm0, Rm1, Rmask
66 @ Rd = (Rn | Rm) - (((Rn ^ Rm) & ~0x01010101) >> 1)
73 and \Rd0, \Rd0, \Rmask
74 and \Rd1, \Rd1, \Rmask
75 sub \Rd0, \Rn0, \Rd0, lsr #1
76 sub \Rd1, \Rn1, \Rd1, lsr #1
79 .macro NO_RND_AVG32 Rd0, Rd1, Rn0, Rn1, Rm0, Rm1, Rmask
80 @ Rd = (Rn & Rm) - (((Rn ^ Rm) & ~0x01010101) >> 1)
87 and \Rd0, \Rd0, \Rmask
88 and \Rd1, \Rd1, \Rmask
89 add \Rd0, \Rn0, \Rd0, lsr #1
90 add \Rd1, \Rn1, \Rd1, lsr #1
93 .macro JMP_ALIGN tmp, reg
104 @ ----------------------------------------------------------------
106 function put_pixels16_arm, export=1
107 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
108 @ block = word aligned, pixles = unaligned
110 stmfd sp!, {r4-r11, lr} @ R14 is also called LR
120 ldmfd sp!, {r4-r11, pc}
125 ADJ_ALIGN_QUADWORD_D 1, r9, r10, r11, r12, r4, r5, r6, r7, r8
131 ldmfd sp!, {r4-r11, pc}
136 ADJ_ALIGN_QUADWORD_D 2, r9, r10, r11, r12, r4, r5, r6, r7, r8
142 ldmfd sp!, {r4-r11, pc}
147 ADJ_ALIGN_QUADWORD_D 3, r9, r10, r11, r12, r4, r5, r6, r7, r8
153 ldmfd sp!, {r4-r11,pc}
156 @ ----------------------------------------------------------------
158 function put_pixels8_arm, export=1
159 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
160 @ block = word aligned, pixles = unaligned
162 stmfd sp!, {r4-r5,lr} @ R14 is also called LR
172 ldmfd sp!, {r4-r5,pc}
175 ldmia r1, {r4-r5, r12}
177 ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r12
183 ldmfd sp!, {r4-r5,pc}
186 ldmia r1, {r4-r5, r12}
188 ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r12
194 ldmfd sp!, {r4-r5,pc}
197 ldmia r1, {r4-r5, r12}
199 ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r12
205 ldmfd sp!, {r4-r5,pc}
208 @ ----------------------------------------------------------------
210 function put_pixels8_x2_arm, export=1
211 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
212 @ block = word aligned, pixles = unaligned
214 stmfd sp!, {r4-r10,lr} @ R14 is also called LR
218 ldmia r1, {r4-r5, r10}
220 ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10
222 RND_AVG32 r8, r9, r4, r5, r6, r7, r12
227 ldmfd sp!, {r4-r10,pc}
230 ldmia r1, {r4-r5, r10}
232 ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10
233 ADJ_ALIGN_DOUBLEWORD_D 2, r8, r9, r4, r5, r10
235 RND_AVG32 r4, r5, r6, r7, r8, r9, r12
240 ldmfd sp!, {r4-r10,pc}
243 ldmia r1, {r4-r5, r10}
245 ADJ_ALIGN_DOUBLEWORD_D 2, r6, r7, r4, r5, r10
246 ADJ_ALIGN_DOUBLEWORD_D 3, r8, r9, r4, r5, r10
248 RND_AVG32 r4, r5, r6, r7, r8, r9, r12
253 ldmfd sp!, {r4-r10,pc}
256 ldmia r1, {r4-r5, r10}
258 ADJ_ALIGN_DOUBLEWORD_D 3, r6, r7, r4, r5, r10
260 RND_AVG32 r8, r9, r6, r7, r5, r10, r12
265 ldmfd sp!, {r4-r10,pc} @@ update PC with LR content.
269 function put_no_rnd_pixels8_x2_arm, export=1
270 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
271 @ block = word aligned, pixles = unaligned
273 stmfd sp!, {r4-r10,lr} @ R14 is also called LR
277 ldmia r1, {r4-r5, r10}
279 ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10
281 NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12
286 ldmfd sp!, {r4-r10,pc}
289 ldmia r1, {r4-r5, r10}
291 ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10
292 ADJ_ALIGN_DOUBLEWORD_D 2, r8, r9, r4, r5, r10
294 NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12
299 ldmfd sp!, {r4-r10,pc}
302 ldmia r1, {r4-r5, r10}
304 ADJ_ALIGN_DOUBLEWORD_D 2, r6, r7, r4, r5, r10
305 ADJ_ALIGN_DOUBLEWORD_D 3, r8, r9, r4, r5, r10
307 NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12
312 ldmfd sp!, {r4-r10,pc}
315 ldmia r1, {r4-r5, r10}
317 ADJ_ALIGN_DOUBLEWORD_D 3, r6, r7, r4, r5, r10
319 NO_RND_AVG32 r8, r9, r6, r7, r5, r10, r12
324 ldmfd sp!, {r4-r10,pc} @@ update PC with LR content.
328 @ ----------------------------------------------------------------
330 function put_pixels8_y2_arm, export=1
331 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
332 @ block = word aligned, pixles = unaligned
334 stmfd sp!, {r4-r11,lr} @ R14 is also called LR
344 RND_AVG32 r8, r9, r4, r5, r6, r7, r12
350 RND_AVG32 r8, r9, r6, r7, r4, r5, r12
355 ldmfd sp!, {r4-r11,pc}
361 ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6
365 ADJ_ALIGN_DOUBLEWORD 1, r7, r8, r9
366 RND_AVG32 r10, r11, r4, r5, r7, r8, r12
372 ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6
374 RND_AVG32 r10, r11, r7, r8, r4, r5, r12
378 ldmfd sp!, {r4-r11,pc}
384 ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6
388 ADJ_ALIGN_DOUBLEWORD 2, r7, r8, r9
389 RND_AVG32 r10, r11, r4, r5, r7, r8, r12
395 ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6
397 RND_AVG32 r10, r11, r7, r8, r4, r5, r12
401 ldmfd sp!, {r4-r11,pc}
407 ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6
411 ADJ_ALIGN_DOUBLEWORD 3, r7, r8, r9
412 RND_AVG32 r10, r11, r4, r5, r7, r8, r12
418 ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6
420 RND_AVG32 r10, r11, r7, r8, r4, r5, r12
424 ldmfd sp!, {r4-r11,pc}
428 function put_no_rnd_pixels8_y2_arm, export=1
429 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
430 @ block = word aligned, pixles = unaligned
432 stmfd sp!, {r4-r11,lr} @ R14 is also called LR
442 NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12
448 NO_RND_AVG32 r8, r9, r6, r7, r4, r5, r12
453 ldmfd sp!, {r4-r11,pc}
459 ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6
463 ADJ_ALIGN_DOUBLEWORD 1, r7, r8, r9
464 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12
470 ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6
472 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12
476 ldmfd sp!, {r4-r11,pc}
482 ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6
486 ADJ_ALIGN_DOUBLEWORD 2, r7, r8, r9
487 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12
493 ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6
495 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12
499 ldmfd sp!, {r4-r11,pc}
505 ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6
509 ADJ_ALIGN_DOUBLEWORD 3, r7, r8, r9
510 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12
516 ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6
518 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12
522 ldmfd sp!, {r4-r11,pc}
527 @ ----------------------------------------------------------------
528 .macro RND_XY2_IT align, rnd
529 @ l1= (a & 0x03030303) + (b & 0x03030303) ?(+ 0x02020202)
530 @ h1= ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2)
541 ADJ_ALIGN_DOUBLEWORD_D 1, r4, r5, r6, r7, r8
543 ADJ_ALIGN_DOUBLEWORD_D 1, r4, r5, r8, r9, r10
544 ADJ_ALIGN_DOUBLEWORD_D 2, r6, r7, r8, r9, r10
546 ADJ_ALIGN_DOUBLEWORD_D 2, r4, r5, r8, r9, r10
547 ADJ_ALIGN_DOUBLEWORD_D 3, r6, r7, r8, r9, r10
549 ADJ_ALIGN_DOUBLEWORD_D 3, r4, r5, r5, r6, r7
557 andeq r14, r14, r14, \rnd #1
560 ldr r12, =0xfcfcfcfc >> 2
563 and r4, r12, r4, lsr #2
564 and r5, r12, r5, lsr #2
565 and r6, r12, r6, lsr #2
566 and r7, r12, r7, lsr #2
572 .macro RND_XY2_EXPAND align, rnd
573 RND_XY2_IT \align, \rnd
574 6: stmfd sp!, {r8-r11}
575 RND_XY2_IT \align, \rnd
582 and r4, r14, r4, lsr #2
583 and r5, r14, r5, lsr #2
589 ldmfd sp!, {r4-r11,pc}
593 function put_pixels8_xy2_arm, export=1
594 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
595 @ block = word aligned, pixles = unaligned
597 stmfd sp!, {r4-r11,lr} @ R14 is also called LR
600 RND_XY2_EXPAND 0, lsl
604 RND_XY2_EXPAND 1, lsl
608 RND_XY2_EXPAND 2, lsl
612 RND_XY2_EXPAND 3, lsl
616 function put_no_rnd_pixels8_xy2_arm, export=1
617 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
618 @ block = word aligned, pixles = unaligned
620 stmfd sp!, {r4-r11,lr} @ R14 is also called LR
623 RND_XY2_EXPAND 0, lsr
627 RND_XY2_EXPAND 1, lsr
631 RND_XY2_EXPAND 2, lsr
635 RND_XY2_EXPAND 3, lsr
639 @ void ff_add_pixels_clamped_ARM(int16_t *block, uint8_t *dest, int stride)
640 function ff_add_pixels_clamped_ARM, export=1
644 ldr r4, [r1] /* load dest */
645 /* block[0] and block[1]*/
651 add r8, r7, r8, lsr #8
655 movne r6, r5, lsr #24
657 movne r8, r7, lsr #24
659 ldrsh r5, [r0, #4] /* moved form [A] */
660 orr r9, r9, r8, lsl #8
661 /* block[2] and block[3] */
664 and r6, r4, #0xFF0000
665 and r8, r4, #0xFF000000
666 add r6, r5, r6, lsr #16
667 add r8, r7, r8, lsr #24
671 movne r6, r5, lsr #24
673 movne r8, r7, lsr #24
674 orr r9, r9, r6, lsl #16
675 ldr r4, [r1, #4] /* moved form [B] */
676 orr r9, r9, r8, lsl #24
678 ldrsh r5, [r0, #8] /* moved form [C] */
683 /* block[4] and block[5] */
689 add r8, r7, r8, lsr #8
693 movne r6, r5, lsr #24
695 movne r8, r7, lsr #24
697 ldrsh r5, [r0, #12] /* moved from [D] */
698 orr r9, r9, r8, lsl #8
699 /* block[6] and block[7] */
702 and r6, r4, #0xFF0000
703 and r8, r4, #0xFF000000
704 add r6, r5, r6, lsr #16
705 add r8, r7, r8, lsr #24
709 movne r6, r5, lsr #24
711 movne r8, r7, lsr #24
712 orr r9, r9, r6, lsl #16
713 add r0, r0, #16 /* moved from [E] */
714 orr r9, r9, r8, lsl #24
715 subs r10, r10, #1 /* moved from [F] */