1 ;******************************************************************************
2 ;* x86-SIMD-optimized IDCT for prores
3 ;* this is identical to "simple" IDCT written by Michael Niedermayer
4 ;* except for the clip range
6 ;* Copyright (c) 2011 Ronald S. Bultje <rsbultje@gmail.com>
8 ;* This file is part of FFmpeg.
10 ;* FFmpeg is free software; you can redistribute it and/or
11 ;* modify it under the terms of the GNU Lesser General Public
12 ;* License as published by the Free Software Foundation; either
13 ;* version 2.1 of the License, or (at your option) any later version.
15 ;* FFmpeg is distributed in the hope that it will be useful,
16 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
17 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 ;* Lesser General Public License for more details.
20 ;* You should have received a copy of the GNU Lesser General Public
21 ;* License along with FFmpeg; if not, write to the Free Software
22 ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 ;******************************************************************************
25 ; add SECTION_RODATA and proper include before including this file!
29 %macro define_constants
1
38 %define w4_plus_w2 w4_plus_w2
%1
39 %define w4_min_w2 w4_min_w2
%1
40 %define w4_plus_w6 w4_plus_w6
%1
41 %define w4_min_w6 w4_min_w6
%1
42 %define w1_plus_w3 w1_plus_w3
%1
43 %define w3_min_w1 w3_min_w1
%1
44 %define w7_plus_w3 w7_plus_w3
%1
45 %define w3_min_w7 w3_min_w7
%1
48 ; interleave data while maintaining source
49 ; %1=type, %2=dstlo, %3=dsthi, %4=src, %5=interleave
51 punpckl
%1 m
%2, m
%4, m
%5
52 punpckh
%1 m
%3, m
%4, m
%5
55 ; %1/%2=src1/dst1, %3/%4=dst2, %5/%6=src2, %7=shift
56 ; action: %3/%4 = %1/%2 - %5/%6; %1/%2 += %5/%6
57 ; %1/%2/%3/%4 >>= %7; dword -> word (in %1/%3)
59 psubd
%3, %1, %5 ; { a0 - b0 }[0-3]
60 psubd
%4, %2, %6 ; { a0 - b0 }[4-7]
61 paddd
%1, %5 ; { a0 + b0 }[0-3]
62 paddd
%2, %6 ; { a0 + b0 }[4-7]
67 packssdw
%1, %2 ; row[0]
68 packssdw
%3, %4 ; row[7]
71 ; %1 = initial bias ("" if nop)
72 ; %2 = number of bits to shift at the end
73 ; %3 = qmat (for prores)
75 ; a0 = (W4 * row[0]) + (1 << (15 - 1));
84 mova m15
, [pd_round_
%+ %2]
88 SBUTTERFLY3 wd
, 0, 1, 10, 8 ; { row[0], row[2] }[0-3]/[4-7]
89 pmaddwd m2
, m0
, [w4_plus_w6
]
90 pmaddwd m3
, m1
, [w4_plus_w6
]
91 pmaddwd m4
, m0
, [w4_min_w6
]
92 pmaddwd m5
, m1
, [w4_min_w6
]
93 pmaddwd m6
, m0
, [w4_min_w2
]
94 pmaddwd m7
, m1
, [w4_min_w2
]
95 pmaddwd m0
, [w4_plus_w2
]
96 pmaddwd m1
, [w4_plus_w2
]
98 ; Adding 1<<(%2-1) for >=15 bits values
109 ; a0: -1*row[0]-1*row[2]
112 ; a3: -1*row[0]+1*row[2]
114 ; a0 += W4*row[4] + W6*row[6]; i.e. -1*row[4]
115 ; a1 -= W4*row[4] + W2*row[6]; i.e. -1*row[4]-1*row[6]
116 ; a2 -= W4*row[4] - W2*row[6]; i.e. -1*row[4]+1*row[6]
117 ; a3 += W4*row[4] - W6*row[6]; i.e. -1*row[4]
118 SBUTTERFLY3 wd
, 8, 9, 13, 12 ; { row[4], row[6] }[0-3]/[4-7]
119 pmaddwd m10
, m8
, [w4_plus_w6
]
120 pmaddwd m11
, m9
, [w4_plus_w6
]
121 paddd m0
, m10
; a0[0-3]
122 paddd m1
, m11
; a0[4-7]
123 pmaddwd m10
, m8
, [w4_min_w6
]
124 pmaddwd m11
, m9
, [w4_min_w6
]
125 paddd m6
, m10
; a3[0-3]
126 paddd m7
, m11
; a3[4-7]
127 pmaddwd m10
, m8
, [w4_min_w2
]
128 pmaddwd m11
, m9
, [w4_min_w2
]
129 pmaddwd m8
, [w4_plus_w2
]
130 pmaddwd m9
, [w4_plus_w2
]
131 psubd m4
, m10
; a2[0-3] intermediate
132 psubd m5
, m11
; a2[4-7] intermediate
133 psubd m2
, m8
; a1[0-3] intermediate
134 psubd m3
, m9
; a1[4-7] intermediate
138 mova
[blockq
+ 32], m2
139 mova
[blockq
+ 64], m4
140 mova
[blockq
+ 96], m6
141 mova m10
,[blockq
+ 16] ; { row[1] }[0-7]
142 mova m8
, [blockq
+ 48] ; { row[3] }[0-7]
143 mova m13
,[blockq
+ 80] ; { row[5] }[0-7]
144 mova m14
,[blockq
+112] ; { row[7] }[0-7]
145 mova
[blockq
+ 16], m1
146 mova
[blockq
+ 48], m3
147 mova
[blockq
+ 80], m5
148 mova
[blockq
+112], m7
156 ; b0 = MUL(W1, row[1]);
157 ; MAC(b0, W3, row[3]);
158 ; b1 = MUL(W3, row[1]);
159 ; MAC(b1, -W7, row[3]);
160 ; b2 = MUL(W5, row[1]);
161 ; MAC(b2, -W1, row[3]);
162 ; b3 = MUL(W7, row[1]);
163 ; MAC(b3, -W5, row[3]);
164 SBUTTERFLY3 wd
, 0, 1, 10, 8 ; { row[1], row[3] }[0-3]/[4-7]
165 pmaddwd m2
, m0
, [w3_min_w7
]
166 pmaddwd m3
, m1
, [w3_min_w7
]
167 pmaddwd m4
, m0
, [w5_min_w1
]
168 pmaddwd m5
, m1
, [w5_min_w1
]
169 pmaddwd m6
, m0
, [w7_min_w5
]
170 pmaddwd m7
, m1
, [w7_min_w5
]
171 pmaddwd m0
, [w1_plus_w3
]
172 pmaddwd m1
, [w1_plus_w3
]
174 ; b0: +1*row[1]+2*row[3]
175 ; b1: +2*row[1]-1*row[3]
176 ; b2: -1*row[1]-1*row[3]
177 ; b3: +1*row[1]+1*row[3]
179 ; MAC(b0, W5, row[5]);
180 ; MAC(b0, W7, row[7]);
181 ; MAC(b1, -W1, row[5]);
182 ; MAC(b1, -W5, row[7]);
183 ; MAC(b2, W7, row[5]);
184 ; MAC(b2, W3, row[7]);
185 ; MAC(b3, W3, row[5]);
186 ; MAC(b3, -W1, row[7]);
187 SBUTTERFLY3 wd
, 8, 9, 13, 14 ; { row[5], row[7] }[0-3]/[4-7]
189 ; b0: -1*row[5]+1*row[7]
190 ; b1: -1*row[5]+1*row[7]
191 ; b2: +1*row[5]+2*row[7]
192 ; b3: +2*row[5]-1*row[7]
194 pmaddwd m10
, m8
, [w1_plus_w5
]
195 pmaddwd m11
, m9
, [w1_plus_w5
]
196 pmaddwd m12
, m8
, [w5_plus_w7
]
197 pmaddwd m13
, m9
, [w5_plus_w7
]
198 psubd m2
, m10
; b1[0-3]
199 psubd m3
, m11
; b1[4-7]
200 paddd m0
, m12
; b0[0-3]
201 paddd m1
, m13
; b0[4-7]
202 pmaddwd m12
, m8
, [w7_plus_w3
]
203 pmaddwd m13
, m9
, [w7_plus_w3
]
204 pmaddwd m8
, [w3_min_w1
]
205 pmaddwd m9
, [w3_min_w1
]
206 paddd m4
, m12
; b2[0-3]
207 paddd m5
, m13
; b2[4-7]
208 paddd m6
, m8
; b3[0-3]
209 paddd m7
, m9
; b3[4-7]
211 ; row[0] = (a0 + b0) >> 15;
212 ; row[7] = (a0 - b0) >> 15;
213 ; row[1] = (a1 + b1) >> 15;
214 ; row[6] = (a1 - b1) >> 15;
215 ; row[2] = (a2 + b2) >> 15;
216 ; row[5] = (a2 - b2) >> 15;
217 ; row[3] = (a3 + b3) >> 15;
218 ; row[4] = (a3 - b3) >> 15;
219 mova m8
, [blockq
+ 0] ; a0[0-3]
220 mova m9
, [blockq
+16] ; a0[4-7]
221 SUMSUB_SHPK m8
, m9
, m10
, m11
, m0
, m1
, %2
222 mova m0
, [blockq
+32] ; a1[0-3]
223 mova m1
, [blockq
+48] ; a1[4-7]
224 SUMSUB_SHPK m0
, m1
, m9
, m11
, m2
, m3
, %2
225 mova m1
, [blockq
+64] ; a2[0-3]
226 mova m2
, [blockq
+80] ; a2[4-7]
227 SUMSUB_SHPK m1
, m2
, m11
, m3
, m4
, m5
, %2
228 mova m2
, [blockq
+96] ; a3[0-3]
229 mova m3
, [blockq
+112] ; a3[4-7]
230 SUMSUB_SHPK m2
, m3
, m4
, m5
, m6
, m7
, %2
233 ; void ff_prores_idct_put_10_<opt>(uint8_t *pixels, ptrdiff_t stride,
234 ; int16_t *block, const int16_t *qmat);
237 ; %2 = row bias macro
239 ; %4 = column bias macro
240 ; %5 = final action (nothing, "store", "put", "add")
241 ; %6 = min pixel value
242 ; %7 = max pixel value
243 ; %8 = qmat (for prores)
246 ; for (i = 0; i < 8; i++)
247 ; idctRowCondDC(block + i*8);
248 mova m10
,[blockq
+ 0] ; { row[0] }[0-7]
249 mova m8
, [blockq
+32] ; { row[2] }[0-7]
250 mova m13
,[blockq
+64] ; { row[4] }[0-7]
251 mova m12
,[blockq
+96] ; { row[6] }[0-7]
261 ; This copies the DC-only shortcut. When there is only a DC coefficient the
262 ; C shifts the value and splats it to all coeffs rather than multiplying and
263 ; doing the full IDCT. This causes a difference on 8-bit because the
264 ; coefficient is 16383 rather than 16384 (which you can get with shifting).
267 por m1
, [blockq
+ 16] ; { row[1] }[0-7]
268 por m1
, [blockq
+ 48] ; { row[3] }[0-7]
269 por m1
, [blockq
+ 80] ; { row[5] }[0-7]
270 por m1
, [blockq
+112] ; { row[7] }[0-7]
304 ; transpose for second part of IDCT
305 TRANSPOSE8x8W
8, 0, 1, 2, 4, 11, 9, 10, 3
306 mova
[blockq
+ 16], m0
307 mova
[blockq
+ 48], m2
308 mova
[blockq
+ 80], m11
309 mova
[blockq
+112], m10
315 ; for (i = 0; i < 8; i++)
316 ; idctSparseColAdd(dest + i, line_size, block + i);
322 ; No clamping, means pure idct
324 mova
[blockq
+ 16], m0
325 mova
[blockq
+ 32], m1
326 mova
[blockq
+ 48], m2
327 mova
[blockq
+ 64], m4
328 mova
[blockq
+ 80], m11
329 mova
[blockq
+ 96], m9
330 mova
[blockq
+112], m10