2 * This file is part of FFmpeg.
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 * @file util_altivec.h
21 * Contains misc utility macros and inline functions
24 #ifndef FFMPEG_UTIL_ALTIVEC_H
25 #define FFMPEG_UTIL_ALTIVEC_H
35 // used to build registers permutation vectors (vcprm)
36 // the 's' are for words in the _s_econd vector
37 #define WORD_0 0x00,0x01,0x02,0x03
38 #define WORD_1 0x04,0x05,0x06,0x07
39 #define WORD_2 0x08,0x09,0x0a,0x0b
40 #define WORD_3 0x0c,0x0d,0x0e,0x0f
41 #define WORD_s0 0x10,0x11,0x12,0x13
42 #define WORD_s1 0x14,0x15,0x16,0x17
43 #define WORD_s2 0x18,0x19,0x1a,0x1b
44 #define WORD_s3 0x1c,0x1d,0x1e,0x1f
46 #define vcprm(a,b,c,d) (const vector unsigned char)AVV(WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d)
47 #define vcii(a,b,c,d) (const vector float)AVV(FLOAT_ ## a, FLOAT_ ## b, FLOAT_ ## c, FLOAT_ ## d)
49 // vcprmle is used to keep the same index as in the SSE version.
50 // it's the same as vcprm, with the index inversed
51 // ('le' is Little Endian)
52 #define vcprmle(a,b,c,d) vcprm(d,c,b,a)
54 // used to build inverse/identity vectors (vcii)
55 // n is _n_egative, p is _p_ositive
60 // Transpose 8x8 matrix of 16-bit elements (in-place)
61 #define TRANSPOSE8(a,b,c,d,e,f,g,h) \
63 vector signed short A1, B1, C1, D1, E1, F1, G1, H1; \
64 vector signed short A2, B2, C2, D2, E2, F2, G2, H2; \
66 A1 = vec_mergeh (a, e); \
67 B1 = vec_mergel (a, e); \
68 C1 = vec_mergeh (b, f); \
69 D1 = vec_mergel (b, f); \
70 E1 = vec_mergeh (c, g); \
71 F1 = vec_mergel (c, g); \
72 G1 = vec_mergeh (d, h); \
73 H1 = vec_mergel (d, h); \
75 A2 = vec_mergeh (A1, E1); \
76 B2 = vec_mergel (A1, E1); \
77 C2 = vec_mergeh (B1, F1); \
78 D2 = vec_mergel (B1, F1); \
79 E2 = vec_mergeh (C1, G1); \
80 F2 = vec_mergel (C1, G1); \
81 G2 = vec_mergeh (D1, H1); \
82 H2 = vec_mergel (D1, H1); \
84 a = vec_mergeh (A2, E2); \
85 b = vec_mergel (A2, E2); \
86 c = vec_mergeh (B2, F2); \
87 d = vec_mergel (B2, F2); \
88 e = vec_mergeh (C2, G2); \
89 f = vec_mergel (C2, G2); \
90 g = vec_mergeh (D2, H2); \
91 h = vec_mergel (D2, H2); \
95 /** \brief loads unaligned vector \a *src with offset \a offset
97 static inline vector
unsigned char unaligned_load(int offset
, uint8_t *src
)
99 register vector
unsigned char first
= vec_ld(offset
, src
);
100 register vector
unsigned char second
= vec_ld(offset
+15, src
);
101 register vector
unsigned char mask
= vec_lvsl(offset
, src
);
102 return vec_perm(first
, second
, mask
);
105 #endif /* FFMPEG_UTIL_ALTIVEC_H */