4 * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
5 * Based on code Copyright (c) 2002 Fabrice Bellard.
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavcodec/dsputil.h"
25 #include "gcc_fixes.h"
27 #include "dsputil_ppc.h"
28 #include "util_altivec.h"
30 those three macros are from libavcodec/fft.c
31 and are required for the reference C code
34 #define BF(pre, pim, qre, qim, pre1, pim1, qre1, qim1) \
36 FFTSample ax, ay, bx, by;\
46 #define MUL16(a,b) ((a) * (b))
47 #define CMUL(pre, pim, are, aim, bre, bim) \
49 pre = (MUL16(are, bre) - MUL16(aim, bim));\
50 pim = (MUL16(are, bim) + MUL16(bre, aim));\
55 * Do a complex FFT with the parameters defined in ff_fft_init(). The
56 * input data must be permuted before with s->revtab table. No
57 * 1.0/sqrt(n) normalization is done.
59 * This code assumes that the 'z' pointer is 16 bytes-aligned
60 * It also assumes all FFTComplex are 8 bytes-aligned pair of float
61 * The code is exactly the same as the SSE version, except
62 * that successive MUL + ADD/SUB have been merged into
63 * fused multiply-add ('vec_madd' in altivec)
65 void ff_fft_calc_altivec(FFTContext
*s
, FFTComplex
*z
)
67 POWERPC_PERF_DECLARE(altivec_fft_num
, s
->nbits
>= 6);
68 register const vector
float vczero
= (const vector
float)vec_splat_u32(0.);
73 register FFTComplex
*p
, *q
;
74 FFTComplex
*cptr
, *cptr1
;
77 POWERPC_PERF_START_COUNT(altivec_fft_num
, s
->nbits
>= 6);
82 vector
float *r
, a
, b
, a1
, c1
, c2
;
84 r
= (vector
float *)&z
[0];
100 a1
= vec_ld(sizeof(vector
float), r
);
102 b
= vec_perm(a
,a
,vcprmle(1,0,3,2));
103 a
= vec_madd(a
,c1
,b
);
104 /* do the pass 0 butterfly */
106 b
= vec_perm(a1
,a1
,vcprmle(1,0,3,2));
107 b
= vec_madd(a1
,c1
,b
);
108 /* do the pass 0 butterfly */
110 /* multiply third by -i */
111 b
= vec_perm(b
,b
,vcprmle(2,3,1,0));
113 /* do the pass 1 butterfly */
114 vec_st(vec_madd(b
,c2
,a
), 0, r
);
115 vec_st(vec_nmsub(b
,c2
,a
), sizeof(vector
float), r
);
135 vector
float a
,b
,c
,t1
;
137 a
= vec_ld(0, (float*)p
);
138 b
= vec_ld(0, (float*)q
);
141 c
= vec_ld(0, (float*)cptr
);
143 t1
= vec_madd(c
, vec_perm(b
,b
,vcprmle(2,2,0,0)),vczero
);
144 c
= vec_ld(sizeof(vector
float), (float*)cptr
);
146 b
= vec_madd(c
, vec_perm(b
,b
,vcprmle(3,3,1,1)),t1
);
149 vec_st(vec_add(a
,b
), 0, (float*)p
);
150 vec_st(vec_sub(a
,b
), 0, (float*)q
);
161 nblocks
= nblocks
>> 1;
162 nloops
= nloops
<< 1;
163 } while (nblocks
!= 0);
165 POWERPC_PERF_STOP_COUNT(altivec_fft_num
, s
->nbits
>= 6);