2 * FFT/MDCT transform with SSE optimizations
3 * Copyright (c) 2008 Loren Merritt
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/x86_cpu.h"
23 #include "libavcodec/dsputil.h"
25 DECLARE_ALIGNED(16, static const int, m1m1m1m1
[4]) =
26 { 1 << 31, 1 << 31, 1 << 31, 1 << 31 };
28 void ff_fft_dispatch_sse(FFTComplex
*z
, int nbits
);
29 void ff_fft_dispatch_interleave_sse(FFTComplex
*z
, int nbits
);
31 void ff_fft_calc_sse(FFTContext
*s
, FFTComplex
*z
)
33 int n
= 1 << s
->nbits
;
35 ff_fft_dispatch_interleave_sse(z
, s
->nbits
);
41 "movaps (%0,%1), %%xmm0 \n"
42 "movaps %%xmm0, %%xmm1 \n"
43 "unpcklps 16(%0,%1), %%xmm0 \n"
44 "unpckhps 16(%0,%1), %%xmm1 \n"
45 "movaps %%xmm0, (%0,%1) \n"
46 "movaps %%xmm1, 16(%0,%1) \n"
56 void ff_fft_permute_sse(FFTContext
*s
, FFTComplex
*z
)
58 int n
= 1 << s
->nbits
;
62 "movaps %2, %%xmm0 \n"
63 "movlps %%xmm0, %0 \n"
64 "movhps %%xmm0, %1 \n"
65 :"=m"(s
->tmp_buf
[s
->revtab
[i
]]),
66 "=m"(s
->tmp_buf
[s
->revtab
[i
+1]])
70 memcpy(z
, s
->tmp_buf
, n
*sizeof(FFTComplex
));
73 void ff_imdct_half_sse(MDCTContext
*s
, FFTSample
*output
, const FFTSample
*input
)
75 av_unused x86_reg i
, j
, k
, l
;
76 long n
= 1 << s
->nbits
;
80 const uint16_t *revtab
= s
->fft
.revtab
+ n8
;
81 const FFTSample
*tcos
= s
->tcos
;
82 const FFTSample
*tsin
= s
->tsin
;
83 FFTComplex
*z
= (FFTComplex
*)output
;
86 for(k
=n8
-2; k
>=0; k
-=2) {
88 "movaps (%2,%1,2), %%xmm0 \n" // { z[k].re, z[k].im, z[k+1].re, z[k+1].im }
89 "movaps -16(%2,%0,2), %%xmm1 \n" // { z[-k-2].re, z[-k-2].im, z[-k-1].re, z[-k-1].im }
90 "movaps %%xmm0, %%xmm2 \n"
91 "shufps $0x88, %%xmm1, %%xmm0 \n" // { z[k].re, z[k+1].re, z[-k-2].re, z[-k-1].re }
92 "shufps $0x77, %%xmm2, %%xmm1 \n" // { z[-k-1].im, z[-k-2].im, z[k+1].im, z[k].im }
93 "movlps (%3,%1), %%xmm4 \n"
94 "movlps (%4,%1), %%xmm5 \n"
95 "movhps -8(%3,%0), %%xmm4 \n" // { cos[k], cos[k+1], cos[-k-2], cos[-k-1] }
96 "movhps -8(%4,%0), %%xmm5 \n" // { sin[k], sin[k+1], sin[-k-2], sin[-k-1] }
97 "movaps %%xmm0, %%xmm2 \n"
98 "movaps %%xmm1, %%xmm3 \n"
99 "mulps %%xmm5, %%xmm0 \n" // re*sin
100 "mulps %%xmm4, %%xmm1 \n" // im*cos
101 "mulps %%xmm4, %%xmm2 \n" // re*cos
102 "mulps %%xmm5, %%xmm3 \n" // im*sin
103 "subps %%xmm0, %%xmm1 \n" // -> re
104 "addps %%xmm3, %%xmm2 \n" // -> im
105 "movaps %%xmm1, %%xmm0 \n"
106 "unpcklps %%xmm2, %%xmm1 \n" // { z[k], z[k+1] }
107 "unpckhps %%xmm2, %%xmm0 \n" // { z[-k-2], z[-k-1] }
108 ::"r"(-4*k
), "r"(4*k
),
109 "r"(input
+n4
), "r"(tcos
+n8
), "r"(tsin
+n8
)
112 // if we have enough regs, don't let gcc make the luts latency-bound
113 // but if not, latency is faster than spilling
114 __asm__("movlps %%xmm0, %0 \n"
115 "movhps %%xmm0, %1 \n"
116 "movlps %%xmm1, %2 \n"
117 "movhps %%xmm1, %3 \n"
118 :"=m"(z
[revtab
[-k
-2]]),
119 "=m"(z
[revtab
[-k
-1]]),
120 "=m"(z
[revtab
[ k
]]),
121 "=m"(z
[revtab
[ k
+1]])
124 __asm__("movlps %%xmm0, %0" :"=m"(z
[revtab
[-k
-2]]));
125 __asm__("movhps %%xmm0, %0" :"=m"(z
[revtab
[-k
-1]]));
126 __asm__("movlps %%xmm1, %0" :"=m"(z
[revtab
[ k
]]));
127 __asm__("movhps %%xmm1, %0" :"=m"(z
[revtab
[ k
+1]]));
131 ff_fft_dispatch_sse(z
, s
->fft
.nbits
);
133 /* post rotation + reinterleave + reorder */
135 #define CMUL(j,xmm0,xmm1)\
136 "movaps (%2,"#j",2), %%xmm6 \n"\
137 "movaps 16(%2,"#j",2), "#xmm0"\n"\
138 "movaps %%xmm6, "#xmm1"\n"\
139 "movaps "#xmm0",%%xmm7 \n"\
140 "mulps (%3,"#j"), %%xmm6 \n"\
141 "mulps (%4,"#j"), "#xmm0"\n"\
142 "mulps (%4,"#j"), "#xmm1"\n"\
143 "mulps (%3,"#j"), %%xmm7 \n"\
144 "subps %%xmm6, "#xmm0"\n"\
145 "addps %%xmm7, "#xmm1"\n"
151 CMUL(%0, %%xmm0
, %%xmm1
)
152 CMUL(%1, %%xmm4
, %%xmm5
)
153 "shufps $0x1b, %%xmm1, %%xmm1 \n"
154 "shufps $0x1b, %%xmm5, %%xmm5 \n"
155 "movaps %%xmm4, %%xmm6 \n"
156 "unpckhps %%xmm1, %%xmm4 \n"
157 "unpcklps %%xmm1, %%xmm6 \n"
158 "movaps %%xmm0, %%xmm2 \n"
159 "unpcklps %%xmm5, %%xmm0 \n"
160 "unpckhps %%xmm5, %%xmm2 \n"
161 "movaps %%xmm6, (%2,%1,2) \n"
162 "movaps %%xmm4, 16(%2,%1,2) \n"
163 "movaps %%xmm0, (%2,%0,2) \n"
164 "movaps %%xmm2, 16(%2,%0,2) \n"
169 :"r"(z
+n8
), "r"(tcos
+n8
), "r"(tsin
+n8
)
174 void ff_imdct_calc_sse(MDCTContext
*s
, FFTSample
*output
, const FFTSample
*input
)
177 long n
= 1 << s
->nbits
;
180 ff_imdct_half_sse(s
, output
+n4
, input
);
185 "movaps %4, %%xmm7 \n"
187 "movaps (%2,%1), %%xmm0 \n"
188 "movaps (%3,%0), %%xmm1 \n"
189 "shufps $0x1b, %%xmm0, %%xmm0 \n"
190 "shufps $0x1b, %%xmm1, %%xmm1 \n"
191 "xorps %%xmm7, %%xmm0 \n"
192 "movaps %%xmm1, (%3,%1) \n"
193 "movaps %%xmm0, (%2,%0) \n"
198 :"r"(output
+n4
), "r"(output
+n4
*3),