2 * GMC (Global Motion Compensation)
4 * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavcodec/dsputil.h"
25 #include "gcc_fixes.h"
27 #include "dsputil_ppc.h"
28 #include "util_altivec.h"
31 altivec-enhanced gmc1. ATM this code assume stride is a multiple of 8,
32 to preserve proper dst alignment.
34 #define GMC1_PERF_COND (h==8)
35 void gmc1_altivec(uint8_t *dst
/* align 8 */, uint8_t *src
/* align1 */, int stride
, int h
, int x16
, int y16
, int rounder
)
37 POWERPC_PERF_DECLARE(altivec_gmc1_num
, GMC1_PERF_COND
);
38 const DECLARE_ALIGNED_16(unsigned short, rounder_a
[8]) =
39 {rounder
, rounder
, rounder
, rounder
,
40 rounder
, rounder
, rounder
, rounder
};
41 const DECLARE_ALIGNED_16(unsigned short, ABCD
[8]) =
43 (16-x16
)*(16-y16
), /* A */
44 ( x16
)*(16-y16
), /* B */
45 (16-x16
)*( y16
), /* C */
46 ( x16
)*( y16
), /* D */
47 0, 0, 0, 0 /* padding */
49 register const vector
unsigned char vczero
= (const vector
unsigned char)vec_splat_u8(0);
50 register const vector
unsigned short vcsr8
= (const vector
unsigned short)vec_splat_u16(8);
51 register vector
unsigned char dstv
, dstv2
, src_0
, src_1
, srcvA
, srcvB
, srcvC
, srcvD
;
52 register vector
unsigned short Av
, Bv
, Cv
, Dv
, rounderV
, tempA
, tempB
, tempC
, tempD
;
54 unsigned long dst_odd
= (unsigned long)dst
& 0x0000000F;
55 unsigned long src_really_odd
= (unsigned long)src
& 0x0000000F;
58 POWERPC_PERF_START_COUNT(altivec_gmc1_num
, GMC1_PERF_COND
);
60 tempA
= vec_ld(0, (unsigned short*)ABCD
);
61 Av
= vec_splat(tempA
, 0);
62 Bv
= vec_splat(tempA
, 1);
63 Cv
= vec_splat(tempA
, 2);
64 Dv
= vec_splat(tempA
, 3);
66 rounderV
= vec_ld(0, (unsigned short*)rounder_a
);
68 // we'll be able to pick-up our 9 char elements
69 // at src from those 32 bytes
70 // we load the first batch here, as inside the loop
71 // we can re-use 'src+stride' from one iteration
72 // as the 'src' of the next.
73 src_0
= vec_ld(0, src
);
74 src_1
= vec_ld(16, src
);
75 srcvA
= vec_perm(src_0
, src_1
, vec_lvsl(0, src
));
77 if (src_really_odd
!= 0x0000000F)
78 { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
79 srcvB
= vec_perm(src_0
, src_1
, vec_lvsl(1, src
));
85 srcvA
= vec_mergeh(vczero
, srcvA
);
86 srcvB
= vec_mergeh(vczero
, srcvB
);
90 dst_odd
= (unsigned long)dst
& 0x0000000F;
91 src_really_odd
= (((unsigned long)src
) + stride
) & 0x0000000F;
93 dstv
= vec_ld(0, dst
);
95 // we we'll be able to pick-up our 9 char elements
96 // at src + stride from those 32 bytes
97 // then reuse the resulting 2 vectors srvcC and srcvD
98 // as the next srcvA and srcvB
99 src_0
= vec_ld(stride
+ 0, src
);
100 src_1
= vec_ld(stride
+ 16, src
);
101 srcvC
= vec_perm(src_0
, src_1
, vec_lvsl(stride
+ 0, src
));
103 if (src_really_odd
!= 0x0000000F)
104 { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
105 srcvD
= vec_perm(src_0
, src_1
, vec_lvsl(stride
+ 1, src
));
112 srcvC
= vec_mergeh(vczero
, srcvC
);
113 srcvD
= vec_mergeh(vczero
, srcvD
);
116 // OK, now we (finally) do the math :-)
117 // those four instructions replaces 32 int muls & 32 int adds.
118 // isn't AltiVec nice ?
119 tempA
= vec_mladd((vector
unsigned short)srcvA
, Av
, rounderV
);
120 tempB
= vec_mladd((vector
unsigned short)srcvB
, Bv
, tempA
);
121 tempC
= vec_mladd((vector
unsigned short)srcvC
, Cv
, tempB
);
122 tempD
= vec_mladd((vector
unsigned short)srcvD
, Dv
, tempC
);
127 tempD
= vec_sr(tempD
, vcsr8
);
129 dstv2
= vec_pack(tempD
, (vector
unsigned short)vczero
);
133 dstv2
= vec_perm(dstv
, dstv2
, vcprm(0,1,s0
,s1
));
137 dstv2
= vec_perm(dstv
, dstv2
, vcprm(s0
,s1
,2,3));
140 vec_st(dstv2
, 0, dst
);
146 POWERPC_PERF_STOP_COUNT(altivec_gmc1_num
, GMC1_PERF_COND
);