2 * Copyright (C) 2003 James Klicman <james@klicman.org>
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "libavutil/common.h"
26 #include "dsputil_altivec.h"
28 #define vs16(v) ((vector signed short)(v))
29 #define vs32(v) ((vector signed int)(v))
30 #define vu8(v) ((vector unsigned char)(v))
31 #define vu16(v) ((vector unsigned short)(v))
32 #define vu32(v) ((vector unsigned int)(v))
35 #define C1 0.98078525066375732421875000 /* cos(1*PI/16) */
36 #define C2 0.92387950420379638671875000 /* cos(2*PI/16) */
37 #define C3 0.83146959543228149414062500 /* cos(3*PI/16) */
38 #define C4 0.70710676908493041992187500 /* cos(4*PI/16) */
39 #define C5 0.55557024478912353515625000 /* cos(5*PI/16) */
40 #define C6 0.38268342614173889160156250 /* cos(6*PI/16) */
41 #define C7 0.19509032368659973144531250 /* cos(7*PI/16) */
42 #define SQRT_2 1.41421353816986083984375000 /* sqrt(2) */
47 #define W2 (SQRT_2 * C6)
48 #define W3 (SQRT_2 * C3)
49 #define W4 (SQRT_2 * (-C1 + C3 + C5 - C7))
50 #define W5 (SQRT_2 * ( C1 + C3 - C5 + C7))
51 #define W6 (SQRT_2 * ( C1 + C3 + C5 - C7))
52 #define W7 (SQRT_2 * ( C1 + C3 - C5 - C7))
53 #define W8 (SQRT_2 * ( C7 - C3))
54 #define W9 (SQRT_2 * (-C1 - C3))
55 #define WA (SQRT_2 * (-C3 - C5))
56 #define WB (SQRT_2 * ( C5 - C3))
59 static vector
float fdctconsts
[3] = {
65 #define LD_W0 vec_splat(cnsts0, 0)
66 #define LD_W1 vec_splat(cnsts0, 1)
67 #define LD_W2 vec_splat(cnsts0, 2)
68 #define LD_W3 vec_splat(cnsts0, 3)
69 #define LD_W4 vec_splat(cnsts1, 0)
70 #define LD_W5 vec_splat(cnsts1, 1)
71 #define LD_W6 vec_splat(cnsts1, 2)
72 #define LD_W7 vec_splat(cnsts1, 3)
73 #define LD_W8 vec_splat(cnsts2, 0)
74 #define LD_W9 vec_splat(cnsts2, 1)
75 #define LD_WA vec_splat(cnsts2, 2)
76 #define LD_WB vec_splat(cnsts2, 3)
79 #define FDCTROW(b0,b1,b2,b3,b4,b5,b6,b7) /* {{{ */ \
80 x0 = vec_add(b0, b7); /* x0 = b0 + b7; */ \
81 x7 = vec_sub(b0, b7); /* x7 = b0 - b7; */ \
82 x1 = vec_add(b1, b6); /* x1 = b1 + b6; */ \
83 x6 = vec_sub(b1, b6); /* x6 = b1 - b6; */ \
84 x2 = vec_add(b2, b5); /* x2 = b2 + b5; */ \
85 x5 = vec_sub(b2, b5); /* x5 = b2 - b5; */ \
86 x3 = vec_add(b3, b4); /* x3 = b3 + b4; */ \
87 x4 = vec_sub(b3, b4); /* x4 = b3 - b4; */ \
89 b7 = vec_add(x0, x3); /* b7 = x0 + x3; */ \
90 b1 = vec_add(x1, x2); /* b1 = x1 + x2; */ \
91 b0 = vec_add(b7, b1); /* b0 = b7 + b1; */ \
92 b4 = vec_sub(b7, b1); /* b4 = b7 - b1; */ \
94 b2 = vec_sub(x0, x3); /* b2 = x0 - x3; */ \
95 b6 = vec_sub(x1, x2); /* b6 = x1 - x2; */ \
96 b5 = vec_add(b6, b2); /* b5 = b6 + b2; */ \
98 b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \
100 b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \
102 b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \
104 x0 = vec_add(x4, x7); /* x0 = x4 + x7; */ \
105 x1 = vec_add(x5, x6); /* x1 = x5 + x6; */ \
106 x2 = vec_add(x4, x6); /* x2 = x4 + x6; */ \
107 x3 = vec_add(x5, x7); /* x3 = x5 + x7; */ \
108 x8 = vec_add(x2, x3); /* x8 = x2 + x3; */ \
110 x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \
113 x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \
115 x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \
117 x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \
119 x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \
122 b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \
124 b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \
126 b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \
128 b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \
130 b7 = vec_add(b7, x2); /* b7 = b7 + x2; */ \
131 b5 = vec_add(b5, x3); /* b5 = b5 + x3; */ \
132 b3 = vec_add(b3, x2); /* b3 = b3 + x2; */ \
133 b1 = vec_add(b1, x3); /* b1 = b1 + x3; */ \
136 #define FDCTCOL(b0,b1,b2,b3,b4,b5,b6,b7) /* {{{ */ \
137 x0 = vec_add(b0, b7); /* x0 = b0 + b7; */ \
138 x7 = vec_sub(b0, b7); /* x7 = b0 - b7; */ \
139 x1 = vec_add(b1, b6); /* x1 = b1 + b6; */ \
140 x6 = vec_sub(b1, b6); /* x6 = b1 - b6; */ \
141 x2 = vec_add(b2, b5); /* x2 = b2 + b5; */ \
142 x5 = vec_sub(b2, b5); /* x5 = b2 - b5; */ \
143 x3 = vec_add(b3, b4); /* x3 = b3 + b4; */ \
144 x4 = vec_sub(b3, b4); /* x4 = b3 - b4; */ \
146 b7 = vec_add(x0, x3); /* b7 = x0 + x3; */ \
147 b1 = vec_add(x1, x2); /* b1 = x1 + x2; */ \
148 b0 = vec_add(b7, b1); /* b0 = b7 + b1; */ \
149 b4 = vec_sub(b7, b1); /* b4 = b7 - b1; */ \
151 b2 = vec_sub(x0, x3); /* b2 = x0 - x3; */ \
152 b6 = vec_sub(x1, x2); /* b6 = x1 - x2; */ \
153 b5 = vec_add(b6, b2); /* b5 = b6 + b2; */ \
155 b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \
157 b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \
159 b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \
161 x0 = vec_add(x4, x7); /* x0 = x4 + x7; */ \
162 x1 = vec_add(x5, x6); /* x1 = x5 + x6; */ \
163 x2 = vec_add(x4, x6); /* x2 = x4 + x6; */ \
164 x3 = vec_add(x5, x7); /* x3 = x5 + x7; */ \
165 x8 = vec_add(x2, x3); /* x8 = x2 + x3; */ \
167 x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \
170 x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \
172 x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \
174 x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \
176 x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \
179 b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \
181 b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \
183 b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \
185 b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \
187 b7 = vec_add(b7, x2); /* b7 += x2; */ \
188 b5 = vec_add(b5, x3); /* b5 += x3; */ \
189 b3 = vec_add(b3, x2); /* b3 += x2; */ \
190 b1 = vec_add(b1, x3); /* b1 += x3; */ \
195 /* two dimensional discrete cosine transform */
197 void ff_fdct_altivec(int16_t *block
)
199 vector
signed short *bp
;
201 vector
float b00
, b10
, b20
, b30
, b40
, b50
, b60
, b70
;
202 vector
float b01
, b11
, b21
, b31
, b41
, b51
, b61
, b71
;
203 vector
float mzero
, cnst
, cnsts0
, cnsts1
, cnsts2
;
204 vector
float x0
, x1
, x2
, x3
, x4
, x5
, x6
, x7
, x8
;
206 /* setup constants {{{ */
208 mzero
= ((vector
float)vec_splat_u32(-1));
209 mzero
= ((vector
float)vec_sl(vu32(mzero
), vu32(mzero
)));
211 cnsts0
= vec_ld(0, cp
); cp
++;
212 cnsts1
= vec_ld(0, cp
); cp
++;
213 cnsts2
= vec_ld(0, cp
);
217 /* 8x8 matrix transpose (vector short[8]) {{{ */
218 #define MERGE_S16(hl,a,b) vec_merge##hl(vs16(a), vs16(b))
220 bp
= (vector
signed short*)block
;
221 b00
= ((vector
float)vec_ld(0, bp
));
222 b40
= ((vector
float)vec_ld(16*4, bp
));
223 b01
= ((vector
float)MERGE_S16(h
, b00
, b40
));
224 b11
= ((vector
float)MERGE_S16(l
, b00
, b40
));
226 b10
= ((vector
float)vec_ld(0, bp
));
227 b50
= ((vector
float)vec_ld(16*4, bp
));
228 b21
= ((vector
float)MERGE_S16(h
, b10
, b50
));
229 b31
= ((vector
float)MERGE_S16(l
, b10
, b50
));
231 b20
= ((vector
float)vec_ld(0, bp
));
232 b60
= ((vector
float)vec_ld(16*4, bp
));
233 b41
= ((vector
float)MERGE_S16(h
, b20
, b60
));
234 b51
= ((vector
float)MERGE_S16(l
, b20
, b60
));
236 b30
= ((vector
float)vec_ld(0, bp
));
237 b70
= ((vector
float)vec_ld(16*4, bp
));
238 b61
= ((vector
float)MERGE_S16(h
, b30
, b70
));
239 b71
= ((vector
float)MERGE_S16(l
, b30
, b70
));
241 x0
= ((vector
float)MERGE_S16(h
, b01
, b41
));
242 x1
= ((vector
float)MERGE_S16(l
, b01
, b41
));
243 x2
= ((vector
float)MERGE_S16(h
, b11
, b51
));
244 x3
= ((vector
float)MERGE_S16(l
, b11
, b51
));
245 x4
= ((vector
float)MERGE_S16(h
, b21
, b61
));
246 x5
= ((vector
float)MERGE_S16(l
, b21
, b61
));
247 x6
= ((vector
float)MERGE_S16(h
, b31
, b71
));
248 x7
= ((vector
float)MERGE_S16(l
, b31
, b71
));
250 b00
= ((vector
float)MERGE_S16(h
, x0
, x4
));
251 b10
= ((vector
float)MERGE_S16(l
, x0
, x4
));
252 b20
= ((vector
float)MERGE_S16(h
, x1
, x5
));
253 b30
= ((vector
float)MERGE_S16(l
, x1
, x5
));
254 b40
= ((vector
float)MERGE_S16(h
, x2
, x6
));
255 b50
= ((vector
float)MERGE_S16(l
, x2
, x6
));
256 b60
= ((vector
float)MERGE_S16(h
, x3
, x7
));
257 b70
= ((vector
float)MERGE_S16(l
, x3
, x7
));
263 /* Some of the initial calculations can be done as vector short before
264 * conversion to vector float. The following code section takes advantage
268 x0
= ((vector
float)vec_add(vs16(b00
), vs16(b70
)));
269 x7
= ((vector
float)vec_sub(vs16(b00
), vs16(b70
)));
270 x1
= ((vector
float)vec_add(vs16(b10
), vs16(b60
)));
271 x6
= ((vector
float)vec_sub(vs16(b10
), vs16(b60
)));
272 x2
= ((vector
float)vec_add(vs16(b20
), vs16(b50
)));
273 x5
= ((vector
float)vec_sub(vs16(b20
), vs16(b50
)));
274 x3
= ((vector
float)vec_add(vs16(b30
), vs16(b40
)));
275 x4
= ((vector
float)vec_sub(vs16(b30
), vs16(b40
)));
277 b70
= ((vector
float)vec_add(vs16(x0
), vs16(x3
)));
278 b10
= ((vector
float)vec_add(vs16(x1
), vs16(x2
)));
280 b00
= ((vector
float)vec_add(vs16(b70
), vs16(b10
)));
281 b40
= ((vector
float)vec_sub(vs16(b70
), vs16(b10
)));
284 b##n##1 = ((vector float)vec_unpackl(vs16(b##n##0))); \
285 b##n##0 = ((vector float)vec_unpackh(vs16(b##n##0))); \
286 b##n##1 = vec_ctf(vs32(b##n##1), 0); \
287 b##n##0 = vec_ctf(vs32(b##n##0), 0);
292 b20
= ((vector
float)vec_sub(vs16(x0
), vs16(x3
)));
293 b60
= ((vector
float)vec_sub(vs16(x1
), vs16(x2
)));
300 x0
= vec_add(b60
, b20
);
301 x1
= vec_add(b61
, b21
);
304 x0
= vec_madd(cnst
, x0
, mzero
);
305 x1
= vec_madd(cnst
, x1
, mzero
);
307 b20
= vec_madd(cnst
, b20
, x0
);
308 b21
= vec_madd(cnst
, b21
, x1
);
310 b60
= vec_madd(cnst
, b60
, x0
);
311 b61
= vec_madd(cnst
, b61
, x1
);
314 b##0 = ((vector float)vec_unpackh(vs16(x))); \
315 b##1 = ((vector float)vec_unpackl(vs16(x))); \
316 b##0 = vec_ctf(vs32(b##0), 0); \
317 b##1 = vec_ctf(vs32(b##1), 0); \
327 x0
= vec_add(b70
, b10
);
328 x1
= vec_add(b50
, b30
);
329 x2
= vec_add(b70
, b30
);
330 x3
= vec_add(b50
, b10
);
331 x8
= vec_add(x2
, x3
);
333 x8
= vec_madd(cnst
, x8
, mzero
);
336 x0
= vec_madd(cnst
, x0
, mzero
);
338 x1
= vec_madd(cnst
, x1
, mzero
);
340 x2
= vec_madd(cnst
, x2
, x8
);
342 x3
= vec_madd(cnst
, x3
, x8
);
345 b70
= vec_madd(cnst
, b70
, x0
);
347 b50
= vec_madd(cnst
, b50
, x1
);
349 b30
= vec_madd(cnst
, b30
, x1
);
351 b10
= vec_madd(cnst
, b10
, x0
);
353 b70
= vec_add(b70
, x2
);
354 b50
= vec_add(b50
, x3
);
355 b30
= vec_add(b30
, x2
);
356 b10
= vec_add(b10
, x3
);
359 x0
= vec_add(b71
, b11
);
360 x1
= vec_add(b51
, b31
);
361 x2
= vec_add(b71
, b31
);
362 x3
= vec_add(b51
, b11
);
363 x8
= vec_add(x2
, x3
);
365 x8
= vec_madd(cnst
, x8
, mzero
);
368 x0
= vec_madd(cnst
, x0
, mzero
);
370 x1
= vec_madd(cnst
, x1
, mzero
);
372 x2
= vec_madd(cnst
, x2
, x8
);
374 x3
= vec_madd(cnst
, x3
, x8
);
377 b71
= vec_madd(cnst
, b71
, x0
);
379 b51
= vec_madd(cnst
, b51
, x1
);
381 b31
= vec_madd(cnst
, b31
, x1
);
383 b11
= vec_madd(cnst
, b11
, x0
);
385 b71
= vec_add(b71
, x2
);
386 b51
= vec_add(b51
, x3
);
387 b31
= vec_add(b31
, x2
);
388 b11
= vec_add(b11
, x3
);
392 /* 8x8 matrix transpose (vector float[8][2]) {{{ */
393 x0
= vec_mergel(b00
, b20
);
394 x1
= vec_mergeh(b00
, b20
);
395 x2
= vec_mergel(b10
, b30
);
396 x3
= vec_mergeh(b10
, b30
);
398 b00
= vec_mergeh(x1
, x3
);
399 b10
= vec_mergel(x1
, x3
);
400 b20
= vec_mergeh(x0
, x2
);
401 b30
= vec_mergel(x0
, x2
);
403 x4
= vec_mergel(b41
, b61
);
404 x5
= vec_mergeh(b41
, b61
);
405 x6
= vec_mergel(b51
, b71
);
406 x7
= vec_mergeh(b51
, b71
);
408 b41
= vec_mergeh(x5
, x7
);
409 b51
= vec_mergel(x5
, x7
);
410 b61
= vec_mergeh(x4
, x6
);
411 b71
= vec_mergel(x4
, x6
);
413 x0
= vec_mergel(b01
, b21
);
414 x1
= vec_mergeh(b01
, b21
);
415 x2
= vec_mergel(b11
, b31
);
416 x3
= vec_mergeh(b11
, b31
);
418 x4
= vec_mergel(b40
, b60
);
419 x5
= vec_mergeh(b40
, b60
);
420 x6
= vec_mergel(b50
, b70
);
421 x7
= vec_mergeh(b50
, b70
);
423 b40
= vec_mergeh(x1
, x3
);
424 b50
= vec_mergel(x1
, x3
);
425 b60
= vec_mergeh(x0
, x2
);
426 b70
= vec_mergel(x0
, x2
);
428 b01
= vec_mergeh(x5
, x7
);
429 b11
= vec_mergel(x5
, x7
);
430 b21
= vec_mergeh(x4
, x6
);
431 b31
= vec_mergel(x4
, x6
);
435 FDCTCOL(b00
, b10
, b20
, b30
, b40
, b50
, b60
, b70
);
436 FDCTCOL(b01
, b11
, b21
, b31
, b41
, b51
, b61
, b71
);
439 /* round, convert back to short {{{ */
441 b##n##0 = vec_round(b##n##0); \
442 b##n##1 = vec_round(b##n##1); \
443 b##n##0 = ((vector float)vec_cts(b##n##0, 0)); \
444 b##n##1 = ((vector float)vec_cts(b##n##1, 0)); \
445 b##n##0 = ((vector float)vec_pack(vs32(b##n##0), vs32(b##n##1))); \
446 vec_st(vs16(b##n##0), 0, bp);
448 bp
= (vector
signed short*)block
;
462 /* vim:set foldmethod=marker foldlevel=0: */