2 * GMC (Global Motion Compensation)
4 * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
6 * This file is part of Libav.
8 * Libav is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * Libav is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with Libav; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavutil/mem.h"
24 #include "libavutil/ppc/types_altivec.h"
25 #include "libavutil/ppc/util_altivec.h"
26 #include "dsputil_altivec.h"
29 altivec-enhanced gmc1. ATM this code assume stride is a multiple of 8,
30 to preserve proper dst alignment.
32 void ff_gmc1_altivec(uint8_t *dst
/* align 8 */, uint8_t *src
/* align1 */, int stride
, int h
, int x16
, int y16
, int rounder
)
34 const DECLARE_ALIGNED(16, unsigned short, rounder_a
) = rounder
;
35 const DECLARE_ALIGNED(16, unsigned short, ABCD
)[8] =
37 (16-x16
)*(16-y16
), /* A */
38 ( x16
)*(16-y16
), /* B */
39 (16-x16
)*( y16
), /* C */
40 ( x16
)*( y16
), /* D */
41 0, 0, 0, 0 /* padding */
43 register const vector
unsigned char vczero
= (const vector
unsigned char)vec_splat_u8(0);
44 register const vector
unsigned short vcsr8
= (const vector
unsigned short)vec_splat_u16(8);
45 register vector
unsigned char dstv
, dstv2
, src_0
, src_1
, srcvA
, srcvB
, srcvC
, srcvD
;
46 register vector
unsigned short Av
, Bv
, Cv
, Dv
, rounderV
, tempA
, tempB
, tempC
, tempD
;
48 unsigned long dst_odd
= (unsigned long)dst
& 0x0000000F;
49 unsigned long src_really_odd
= (unsigned long)src
& 0x0000000F;
51 tempA
= vec_ld(0, (const unsigned short*)ABCD
);
52 Av
= vec_splat(tempA
, 0);
53 Bv
= vec_splat(tempA
, 1);
54 Cv
= vec_splat(tempA
, 2);
55 Dv
= vec_splat(tempA
, 3);
57 rounderV
= vec_splat((vec_u16
)vec_lde(0, &rounder_a
), 0);
59 // we'll be able to pick-up our 9 char elements
60 // at src from those 32 bytes
61 // we load the first batch here, as inside the loop
62 // we can re-use 'src+stride' from one iteration
63 // as the 'src' of the next.
64 src_0
= vec_ld(0, src
);
65 src_1
= vec_ld(16, src
);
66 srcvA
= vec_perm(src_0
, src_1
, vec_lvsl(0, src
));
68 if (src_really_odd
!= 0x0000000F) {
69 // if src & 0xF == 0xF, then (src+1) is properly aligned
70 // on the second vector.
71 srcvB
= vec_perm(src_0
, src_1
, vec_lvsl(1, src
));
75 srcvA
= vec_mergeh(vczero
, srcvA
);
76 srcvB
= vec_mergeh(vczero
, srcvB
);
79 dst_odd
= (unsigned long)dst
& 0x0000000F;
80 src_really_odd
= (((unsigned long)src
) + stride
) & 0x0000000F;
82 dstv
= vec_ld(0, dst
);
84 // we we'll be able to pick-up our 9 char elements
85 // at src + stride from those 32 bytes
86 // then reuse the resulting 2 vectors srvcC and srcvD
87 // as the next srcvA and srcvB
88 src_0
= vec_ld(stride
+ 0, src
);
89 src_1
= vec_ld(stride
+ 16, src
);
90 srcvC
= vec_perm(src_0
, src_1
, vec_lvsl(stride
+ 0, src
));
92 if (src_really_odd
!= 0x0000000F) {
93 // if src & 0xF == 0xF, then (src+1) is properly aligned
94 // on the second vector.
95 srcvD
= vec_perm(src_0
, src_1
, vec_lvsl(stride
+ 1, src
));
100 srcvC
= vec_mergeh(vczero
, srcvC
);
101 srcvD
= vec_mergeh(vczero
, srcvD
);
104 // OK, now we (finally) do the math :-)
105 // those four instructions replaces 32 int muls & 32 int adds.
106 // isn't AltiVec nice ?
107 tempA
= vec_mladd((vector
unsigned short)srcvA
, Av
, rounderV
);
108 tempB
= vec_mladd((vector
unsigned short)srcvB
, Bv
, tempA
);
109 tempC
= vec_mladd((vector
unsigned short)srcvC
, Cv
, tempB
);
110 tempD
= vec_mladd((vector
unsigned short)srcvD
, Dv
, tempC
);
115 tempD
= vec_sr(tempD
, vcsr8
);
117 dstv2
= vec_pack(tempD
, (vector
unsigned short)vczero
);
120 dstv2
= vec_perm(dstv
, dstv2
, vcprm(0,1,s0
,s1
));
122 dstv2
= vec_perm(dstv
, dstv2
, vcprm(s0
,s1
,2,3));
125 vec_st(dstv2
, 0, dst
);