Rename function
[FFMpeg-mirror/DVCPRO-HD.git] / libavcodec / sh4 / qpel.c
bloba75d22f6ce75faf5138110acc6d944bd600ada89
1 /*
2 * This is optimized for sh, which have post increment addressing (*p++).
3 * Some CPU may be index (p[n]) faster than post increment (*p++).
5 * copyright (c) 2001-2003 BERO <bero@geocities.co.jp>
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #define PIXOP2(OPNAME, OP) \
25 /*static inline void OPNAME ## _no_rnd_pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
27 do {\
28 OP(LP(dst ),no_rnd_avg32(AV_RN32(src1 ),AV_RN32(src2 )) ); \
29 OP(LP(dst+4),no_rnd_avg32(AV_RN32(src1+4),AV_RN32(src2+4)) ); \
30 src1+=src_stride1; \
31 src2+=src_stride2; \
32 dst+=dst_stride; \
33 } while(--h); \
36 static inline void OPNAME ## _pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
38 do {\
39 OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),AV_RN32(src2 )) ); \
40 OP(LP(dst+4),rnd_avg32(AV_RN32(src1+4),AV_RN32(src2+4)) ); \
41 src1+=src_stride1; \
42 src2+=src_stride2; \
43 dst+=dst_stride; \
44 } while(--h); \
47 static inline void OPNAME ## _pixels4_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
49 do {\
50 OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),AV_RN32(src2 )) ); \
51 src1+=src_stride1; \
52 src2+=src_stride2; \
53 dst+=dst_stride; \
54 } while(--h); \
57 static inline void OPNAME ## _no_rnd_pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
59 do {\
60 OP(LP(dst ),no_rnd_avg32(AV_RN32(src1 ),AV_RN32(src2 )) ); \
61 OP(LP(dst+4),no_rnd_avg32(AV_RN32(src1+4),AV_RN32(src2+4)) ); \
62 OP(LP(dst+8),no_rnd_avg32(AV_RN32(src1+8),AV_RN32(src2+8)) ); \
63 OP(LP(dst+12),no_rnd_avg32(AV_RN32(src1+12),AV_RN32(src2+12)) ); \
64 src1+=src_stride1; \
65 src2+=src_stride2; \
66 dst+=dst_stride; \
67 } while(--h); \
70 static inline void OPNAME ## _pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
72 do {\
73 OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),AV_RN32(src2 )) ); \
74 OP(LP(dst+4),rnd_avg32(AV_RN32(src1+4),AV_RN32(src2+4)) ); \
75 OP(LP(dst+8),rnd_avg32(AV_RN32(src1+8),AV_RN32(src2+8)) ); \
76 OP(LP(dst+12),rnd_avg32(AV_RN32(src1+12),AV_RN32(src2+12)) ); \
77 src1+=src_stride1; \
78 src2+=src_stride2; \
79 dst+=dst_stride; \
80 } while(--h); \
81 }*/\
83 static inline void OPNAME ## _pixels4_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
85 do {\
86 OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
87 src1+=src_stride1; \
88 src2+=src_stride2; \
89 dst+=dst_stride; \
90 } while(--h); \
93 static inline void OPNAME ## _pixels4_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
95 do {\
96 OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),LP(src2 )) ); \
97 src1+=src_stride1; \
98 src2+=src_stride2; \
99 dst+=dst_stride; \
100 } while(--h); \
103 static inline void OPNAME ## _no_rnd_pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
105 do {\
106 OP(LP(dst ),no_rnd_avg32(AV_RN32(src1 ),LP(src2 )) ); \
107 OP(LP(dst+4),no_rnd_avg32(AV_RN32(src1+4),LP(src2+4)) ); \
108 OP(LP(dst+8),no_rnd_avg32(AV_RN32(src1+8),LP(src2+8)) ); \
109 OP(LP(dst+12),no_rnd_avg32(AV_RN32(src1+12),LP(src2+12)) ); \
110 src1+=src_stride1; \
111 src2+=src_stride2; \
112 dst+=dst_stride; \
113 } while(--h); \
116 static inline void OPNAME ## _pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
118 do {\
119 OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),LP(src2 )) ); \
120 OP(LP(dst+4),rnd_avg32(AV_RN32(src1+4),LP(src2+4)) ); \
121 OP(LP(dst+8),rnd_avg32(AV_RN32(src1+8),LP(src2+8)) ); \
122 OP(LP(dst+12),rnd_avg32(AV_RN32(src1+12),LP(src2+12)) ); \
123 src1+=src_stride1; \
124 src2+=src_stride2; \
125 dst+=dst_stride; \
126 } while(--h); \
129 static inline void OPNAME ## _no_rnd_pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
131 do { /* onlye src2 aligned */\
132 OP(LP(dst ),no_rnd_avg32(AV_RN32(src1 ),LP(src2 )) ); \
133 OP(LP(dst+4),no_rnd_avg32(AV_RN32(src1+4),LP(src2+4)) ); \
134 src1+=src_stride1; \
135 src2+=src_stride2; \
136 dst+=dst_stride; \
137 } while(--h); \
140 static inline void OPNAME ## _pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
142 do {\
143 OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),LP(src2 )) ); \
144 OP(LP(dst+4),rnd_avg32(AV_RN32(src1+4),LP(src2+4)) ); \
145 src1+=src_stride1; \
146 src2+=src_stride2; \
147 dst+=dst_stride; \
148 } while(--h); \
151 static inline void OPNAME ## _no_rnd_pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
153 do {\
154 OP(LP(dst ),no_rnd_avg32(LP(src1 ),LP(src2 )) ); \
155 OP(LP(dst+4),no_rnd_avg32(LP(src1+4),LP(src2+4)) ); \
156 src1+=src_stride1; \
157 src2+=src_stride2; \
158 dst+=dst_stride; \
159 } while(--h); \
162 static inline void OPNAME ## _pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
164 do {\
165 OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
166 OP(LP(dst+4),rnd_avg32(LP(src1+4),LP(src2+4)) ); \
167 src1+=src_stride1; \
168 src2+=src_stride2; \
169 dst+=dst_stride; \
170 } while(--h); \
173 static inline void OPNAME ## _no_rnd_pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
175 do {\
176 OP(LP(dst ),no_rnd_avg32(LP(src1 ),LP(src2 )) ); \
177 OP(LP(dst+4),no_rnd_avg32(LP(src1+4),LP(src2+4)) ); \
178 OP(LP(dst+8),no_rnd_avg32(LP(src1+8),LP(src2+8)) ); \
179 OP(LP(dst+12),no_rnd_avg32(LP(src1+12),LP(src2+12)) ); \
180 src1+=src_stride1; \
181 src2+=src_stride2; \
182 dst+=dst_stride; \
183 } while(--h); \
186 static inline void OPNAME ## _pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
188 do {\
189 OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
190 OP(LP(dst+4),rnd_avg32(LP(src1+4),LP(src2+4)) ); \
191 OP(LP(dst+8),rnd_avg32(LP(src1+8),LP(src2+8)) ); \
192 OP(LP(dst+12),rnd_avg32(LP(src1+12),LP(src2+12)) ); \
193 src1+=src_stride1; \
194 src2+=src_stride2; \
195 dst+=dst_stride; \
196 } while(--h); \
199 static inline void OPNAME ## _no_rnd_pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
200 { OPNAME ## _no_rnd_pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
202 static inline void OPNAME ## _pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
203 { OPNAME ## _pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
205 static inline void OPNAME ## _no_rnd_pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
206 { OPNAME ## _no_rnd_pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
208 static inline void OPNAME ## _pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
209 { OPNAME ## _pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
211 static inline void OPNAME ## _pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
212 do { \
213 uint32_t a0,a1,a2,a3; \
214 UNPACK(a0,a1,LP(src1),LP(src2)); \
215 UNPACK(a2,a3,LP(src3),LP(src4)); \
216 OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
217 UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
218 UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
219 OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
220 src1+=src_stride1;\
221 src2+=src_stride2;\
222 src3+=src_stride3;\
223 src4+=src_stride4;\
224 dst+=dst_stride;\
225 } while(--h); \
228 static inline void OPNAME ## _no_rnd_pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
229 do { \
230 uint32_t a0,a1,a2,a3; \
231 UNPACK(a0,a1,LP(src1),LP(src2)); \
232 UNPACK(a2,a3,LP(src3),LP(src4)); \
233 OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
234 UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
235 UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
236 OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
237 src1+=src_stride1;\
238 src2+=src_stride2;\
239 src3+=src_stride3;\
240 src4+=src_stride4;\
241 dst+=dst_stride;\
242 } while(--h); \
245 static inline void OPNAME ## _pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
246 do { \
247 uint32_t a0,a1,a2,a3; /* src1 only not aligned */\
248 UNPACK(a0,a1,AV_RN32(src1),LP(src2)); \
249 UNPACK(a2,a3,LP(src3),LP(src4)); \
250 OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
251 UNPACK(a0,a1,AV_RN32(src1+4),LP(src2+4)); \
252 UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
253 OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
254 src1+=src_stride1;\
255 src2+=src_stride2;\
256 src3+=src_stride3;\
257 src4+=src_stride4;\
258 dst+=dst_stride;\
259 } while(--h); \
262 static inline void OPNAME ## _no_rnd_pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
263 do { \
264 uint32_t a0,a1,a2,a3; \
265 UNPACK(a0,a1,AV_RN32(src1),LP(src2)); \
266 UNPACK(a2,a3,LP(src3),LP(src4)); \
267 OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
268 UNPACK(a0,a1,AV_RN32(src1+4),LP(src2+4)); \
269 UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
270 OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
271 src1+=src_stride1;\
272 src2+=src_stride2;\
273 src3+=src_stride3;\
274 src4+=src_stride4;\
275 dst+=dst_stride;\
276 } while(--h); \
279 static inline void OPNAME ## _pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
280 do { \
281 uint32_t a0,a1,a2,a3; \
282 UNPACK(a0,a1,LP(src1),LP(src2)); \
283 UNPACK(a2,a3,LP(src3),LP(src4)); \
284 OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
285 UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
286 UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
287 OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
288 UNPACK(a0,a1,LP(src1+8),LP(src2+8)); \
289 UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
290 OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
291 UNPACK(a0,a1,LP(src1+12),LP(src2+12)); \
292 UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
293 OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
294 src1+=src_stride1;\
295 src2+=src_stride2;\
296 src3+=src_stride3;\
297 src4+=src_stride4;\
298 dst+=dst_stride;\
299 } while(--h); \
302 static inline void OPNAME ## _no_rnd_pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
303 do { \
304 uint32_t a0,a1,a2,a3; \
305 UNPACK(a0,a1,LP(src1),LP(src2)); \
306 UNPACK(a2,a3,LP(src3),LP(src4)); \
307 OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
308 UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
309 UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
310 OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
311 UNPACK(a0,a1,LP(src1+8),LP(src2+8)); \
312 UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
313 OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
314 UNPACK(a0,a1,LP(src1+12),LP(src2+12)); \
315 UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
316 OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
317 src1+=src_stride1;\
318 src2+=src_stride2;\
319 src3+=src_stride3;\
320 src4+=src_stride4;\
321 dst+=dst_stride;\
322 } while(--h); \
325 static inline void OPNAME ## _pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
326 do { /* src1 is unaligned */\
327 uint32_t a0,a1,a2,a3; \
328 UNPACK(a0,a1,AV_RN32(src1),LP(src2)); \
329 UNPACK(a2,a3,LP(src3),LP(src4)); \
330 OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
331 UNPACK(a0,a1,AV_RN32(src1+4),LP(src2+4)); \
332 UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
333 OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
334 UNPACK(a0,a1,AV_RN32(src1+8),LP(src2+8)); \
335 UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
336 OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
337 UNPACK(a0,a1,AV_RN32(src1+12),LP(src2+12)); \
338 UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
339 OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
340 src1+=src_stride1;\
341 src2+=src_stride2;\
342 src3+=src_stride3;\
343 src4+=src_stride4;\
344 dst+=dst_stride;\
345 } while(--h); \
348 static inline void OPNAME ## _no_rnd_pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
349 do { \
350 uint32_t a0,a1,a2,a3; \
351 UNPACK(a0,a1,AV_RN32(src1),LP(src2)); \
352 UNPACK(a2,a3,LP(src3),LP(src4)); \
353 OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
354 UNPACK(a0,a1,AV_RN32(src1+4),LP(src2+4)); \
355 UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
356 OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
357 UNPACK(a0,a1,AV_RN32(src1+8),LP(src2+8)); \
358 UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
359 OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
360 UNPACK(a0,a1,AV_RN32(src1+12),LP(src2+12)); \
361 UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
362 OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
363 src1+=src_stride1;\
364 src2+=src_stride2;\
365 src3+=src_stride3;\
366 src4+=src_stride4;\
367 dst+=dst_stride;\
368 } while(--h); \
372 #define op_avg(a, b) a = rnd_avg32(a,b)
373 #define op_put(a, b) a = b
375 PIXOP2(avg, op_avg)
376 PIXOP2(put, op_put)
377 #undef op_avg
378 #undef op_put
380 #define avg2(a,b) ((a+b+1)>>1)
381 #define avg4(a,b,c,d) ((a+b+c+d+2)>>2)
384 static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder)
386 const int A=(16-x16)*(16-y16);
387 const int B=( x16)*(16-y16);
388 const int C=(16-x16)*( y16);
389 const int D=( x16)*( y16);
391 do {
392 int t0,t1,t2,t3;
393 uint8_t *s0 = src;
394 uint8_t *s1 = src+stride;
395 t0 = *s0++; t2 = *s1++;
396 t1 = *s0++; t3 = *s1++;
397 dst[0]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
398 t0 = *s0++; t2 = *s1++;
399 dst[1]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
400 t1 = *s0++; t3 = *s1++;
401 dst[2]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
402 t0 = *s0++; t2 = *s1++;
403 dst[3]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
404 t1 = *s0++; t3 = *s1++;
405 dst[4]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
406 t0 = *s0++; t2 = *s1++;
407 dst[5]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
408 t1 = *s0++; t3 = *s1++;
409 dst[6]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
410 t0 = *s0++; t2 = *s1++;
411 dst[7]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
412 dst+= stride;
413 src+= stride;
414 }while(--h);
417 static void gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
418 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
420 int y, vx, vy;
421 const int s= 1<<shift;
423 width--;
424 height--;
426 for(y=0; y<h; y++){
427 int x;
429 vx= ox;
430 vy= oy;
431 for(x=0; x<8; x++){ //XXX FIXME optimize
432 int src_x, src_y, frac_x, frac_y, index;
434 src_x= vx>>16;
435 src_y= vy>>16;
436 frac_x= src_x&(s-1);
437 frac_y= src_y&(s-1);
438 src_x>>=shift;
439 src_y>>=shift;
441 if((unsigned)src_x < width){
442 if((unsigned)src_y < height){
443 index= src_x + src_y*stride;
444 dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
445 + src[index +1]* frac_x )*(s-frac_y)
446 + ( src[index+stride ]*(s-frac_x)
447 + src[index+stride+1]* frac_x )* frac_y
448 + r)>>(shift*2);
449 }else{
450 index= src_x + av_clip(src_y, 0, height)*stride;
451 dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
452 + src[index +1]* frac_x )*s
453 + r)>>(shift*2);
455 }else{
456 if((unsigned)src_y < height){
457 index= av_clip(src_x, 0, width) + src_y*stride;
458 dst[y*stride + x]= ( ( src[index ]*(s-frac_y)
459 + src[index+stride ]* frac_y )*s
460 + r)>>(shift*2);
461 }else{
462 index= av_clip(src_x, 0, width) + av_clip(src_y, 0, height)*stride;
463 dst[y*stride + x]= src[index ];
467 vx+= dxx;
468 vy+= dyx;
470 ox += dxy;
471 oy += dyy;
474 #define H264_CHROMA_MC(OPNAME, OP)\
475 static void OPNAME ## h264_chroma_mc2_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
476 const int A=(8-x)*(8-y);\
477 const int B=( x)*(8-y);\
478 const int C=(8-x)*( y);\
479 const int D=( x)*( y);\
481 assert(x<8 && y<8 && x>=0 && y>=0);\
483 do {\
484 int t0,t1,t2,t3; \
485 uint8_t *s0 = src; \
486 uint8_t *s1 = src+stride; \
487 t0 = *s0++; t2 = *s1++; \
488 t1 = *s0++; t3 = *s1++; \
489 OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
490 t0 = *s0++; t2 = *s1++; \
491 OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
492 dst+= stride;\
493 src+= stride;\
494 }while(--h);\
497 static void OPNAME ## h264_chroma_mc4_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
498 const int A=(8-x)*(8-y);\
499 const int B=( x)*(8-y);\
500 const int C=(8-x)*( y);\
501 const int D=( x)*( y);\
503 assert(x<8 && y<8 && x>=0 && y>=0);\
505 do {\
506 int t0,t1,t2,t3; \
507 uint8_t *s0 = src; \
508 uint8_t *s1 = src+stride; \
509 t0 = *s0++; t2 = *s1++; \
510 t1 = *s0++; t3 = *s1++; \
511 OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
512 t0 = *s0++; t2 = *s1++; \
513 OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
514 t1 = *s0++; t3 = *s1++; \
515 OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
516 t0 = *s0++; t2 = *s1++; \
517 OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
518 dst+= stride;\
519 src+= stride;\
520 }while(--h);\
523 static void OPNAME ## h264_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
524 const int A=(8-x)*(8-y);\
525 const int B=( x)*(8-y);\
526 const int C=(8-x)*( y);\
527 const int D=( x)*( y);\
529 assert(x<8 && y<8 && x>=0 && y>=0);\
531 do {\
532 int t0,t1,t2,t3; \
533 uint8_t *s0 = src; \
534 uint8_t *s1 = src+stride; \
535 t0 = *s0++; t2 = *s1++; \
536 t1 = *s0++; t3 = *s1++; \
537 OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
538 t0 = *s0++; t2 = *s1++; \
539 OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
540 t1 = *s0++; t3 = *s1++; \
541 OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
542 t0 = *s0++; t2 = *s1++; \
543 OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
544 t1 = *s0++; t3 = *s1++; \
545 OP(dst[4], (A*t0 + B*t1 + C*t2 + D*t3));\
546 t0 = *s0++; t2 = *s1++; \
547 OP(dst[5], (A*t1 + B*t0 + C*t3 + D*t2));\
548 t1 = *s0++; t3 = *s1++; \
549 OP(dst[6], (A*t0 + B*t1 + C*t2 + D*t3));\
550 t0 = *s0++; t2 = *s1++; \
551 OP(dst[7], (A*t1 + B*t0 + C*t3 + D*t2));\
552 dst+= stride;\
553 src+= stride;\
554 }while(--h);\
557 #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
558 #define op_put(a, b) a = (((b) + 32)>>6)
560 H264_CHROMA_MC(put_ , op_put)
561 H264_CHROMA_MC(avg_ , op_avg)
562 #undef op_avg
563 #undef op_put
565 #define QPEL_MC(r, OPNAME, RND, OP) \
566 static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
567 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
568 do {\
569 uint8_t *s = src; \
570 int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
571 src0= *s++;\
572 src1= *s++;\
573 src2= *s++;\
574 src3= *s++;\
575 src4= *s++;\
576 OP(dst[0], (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));\
577 src5= *s++;\
578 OP(dst[1], (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));\
579 src6= *s++;\
580 OP(dst[2], (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));\
581 src7= *s++;\
582 OP(dst[3], (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));\
583 src8= *s++;\
584 OP(dst[4], (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));\
585 OP(dst[5], (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));\
586 OP(dst[6], (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));\
587 OP(dst[7], (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
588 dst+=dstStride;\
589 src+=srcStride;\
590 }while(--h);\
593 static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
594 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
595 int w=8;\
596 do{\
597 uint8_t *s = src, *d=dst;\
598 int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
599 src0 = *s; s+=srcStride; \
600 src1 = *s; s+=srcStride; \
601 src2 = *s; s+=srcStride; \
602 src3 = *s; s+=srcStride; \
603 src4 = *s; s+=srcStride; \
604 OP(*d, (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));d+=dstStride;\
605 src5 = *s; s+=srcStride; \
606 OP(*d, (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));d+=dstStride;\
607 src6 = *s; s+=srcStride; \
608 OP(*d, (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));d+=dstStride;\
609 src7 = *s; s+=srcStride; \
610 OP(*d, (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));d+=dstStride;\
611 src8 = *s; \
612 OP(*d, (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));d+=dstStride;\
613 OP(*d, (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));d+=dstStride;\
614 OP(*d, (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));d+=dstStride;\
615 OP(*d, (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
616 dst++;\
617 src++;\
618 }while(--w);\
621 static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
622 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
623 do {\
624 uint8_t *s = src;\
625 int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
626 int src9,src10,src11,src12,src13,src14,src15,src16;\
627 src0= *s++;\
628 src1= *s++;\
629 src2= *s++;\
630 src3= *s++;\
631 src4= *s++;\
632 OP(dst[ 0], (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));\
633 src5= *s++;\
634 OP(dst[ 1], (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));\
635 src6= *s++;\
636 OP(dst[ 2], (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));\
637 src7= *s++;\
638 OP(dst[ 3], (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));\
639 src8= *s++;\
640 OP(dst[ 4], (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));\
641 src9= *s++;\
642 OP(dst[ 5], (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));\
643 src10= *s++;\
644 OP(dst[ 6], (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));\
645 src11= *s++;\
646 OP(dst[ 7], (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));\
647 src12= *s++;\
648 OP(dst[ 8], (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));\
649 src13= *s++;\
650 OP(dst[ 9], (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));\
651 src14= *s++;\
652 OP(dst[10], (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));\
653 src15= *s++;\
654 OP(dst[11], (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));\
655 src16= *s++;\
656 OP(dst[12], (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));\
657 OP(dst[13], (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));\
658 OP(dst[14], (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));\
659 OP(dst[15], (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
660 dst+=dstStride;\
661 src+=srcStride;\
662 }while(--h);\
665 static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
666 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
667 int w=16;\
668 do {\
669 uint8_t *s = src, *d=dst;\
670 int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
671 int src9,src10,src11,src12,src13,src14,src15,src16;\
672 src0 = *s; s+=srcStride; \
673 src1 = *s; s+=srcStride; \
674 src2 = *s; s+=srcStride; \
675 src3 = *s; s+=srcStride; \
676 src4 = *s; s+=srcStride; \
677 OP(*d, (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));d+=dstStride;\
678 src5 = *s; s+=srcStride; \
679 OP(*d, (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));d+=dstStride;\
680 src6 = *s; s+=srcStride; \
681 OP(*d, (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));d+=dstStride;\
682 src7 = *s; s+=srcStride; \
683 OP(*d, (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));d+=dstStride;\
684 src8 = *s; s+=srcStride; \
685 OP(*d, (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));d+=dstStride;\
686 src9 = *s; s+=srcStride; \
687 OP(*d, (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));d+=dstStride;\
688 src10 = *s; s+=srcStride; \
689 OP(*d, (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));d+=dstStride;\
690 src11 = *s; s+=srcStride; \
691 OP(*d, (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));d+=dstStride;\
692 src12 = *s; s+=srcStride; \
693 OP(*d, (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));d+=dstStride;\
694 src13 = *s; s+=srcStride; \
695 OP(*d, (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));d+=dstStride;\
696 src14 = *s; s+=srcStride; \
697 OP(*d, (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));d+=dstStride;\
698 src15 = *s; s+=srcStride; \
699 OP(*d, (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));d+=dstStride;\
700 src16 = *s; \
701 OP(*d, (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));d+=dstStride;\
702 OP(*d, (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));d+=dstStride;\
703 OP(*d, (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));d+=dstStride;\
704 OP(*d, (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
705 dst++;\
706 src++;\
707 }while(--w);\
710 static void OPNAME ## qpel8_mc00_c (uint8_t *dst, uint8_t *src, int stride){\
711 OPNAME ## pixels8_c(dst, src, stride, 8);\
714 static void OPNAME ## qpel8_mc10_c(uint8_t *dst, uint8_t *src, int stride){\
715 uint8_t half[64];\
716 put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
717 OPNAME ## pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);\
720 static void OPNAME ## qpel8_mc20_c(uint8_t *dst, uint8_t *src, int stride){\
721 OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8);\
724 static void OPNAME ## qpel8_mc30_c(uint8_t *dst, uint8_t *src, int stride){\
725 uint8_t half[64];\
726 put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
727 OPNAME ## pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);\
730 static void OPNAME ## qpel8_mc01_c(uint8_t *dst, uint8_t *src, int stride){\
731 uint8_t full[16*9];\
732 uint8_t half[64];\
733 copy_block9(full, src, 16, stride, 9);\
734 put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
735 OPNAME ## pixels8_l2_aligned(dst, full, half, stride, 16, 8, 8);\
738 static void OPNAME ## qpel8_mc02_c(uint8_t *dst, uint8_t *src, int stride){\
739 uint8_t full[16*9];\
740 copy_block9(full, src, 16, stride, 9);\
741 OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16);\
744 static void OPNAME ## qpel8_mc03_c(uint8_t *dst, uint8_t *src, int stride){\
745 uint8_t full[16*9];\
746 uint8_t half[64];\
747 copy_block9(full, src, 16, stride, 9);\
748 put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
749 OPNAME ## pixels8_l2_aligned(dst, full+16, half, stride, 16, 8, 8);\
751 static void ff_ ## OPNAME ## qpel8_mc11_old_c(uint8_t *dst, uint8_t *src, int stride){\
752 uint8_t full[16*9];\
753 uint8_t halfH[72];\
754 uint8_t halfV[64];\
755 uint8_t halfHV[64];\
756 copy_block9(full, src, 16, stride, 9);\
757 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
758 put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
759 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
760 OPNAME ## pixels8_l4_aligned(dst, full, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
762 static void OPNAME ## qpel8_mc11_c(uint8_t *dst, uint8_t *src, int stride){\
763 uint8_t full[16*9];\
764 uint8_t halfH[72];\
765 uint8_t halfHV[64];\
766 copy_block9(full, src, 16, stride, 9);\
767 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
768 put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
769 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
770 OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
772 static void ff_ ## OPNAME ## qpel8_mc31_old_c(uint8_t *dst, uint8_t *src, int stride){\
773 uint8_t full[16*9];\
774 uint8_t halfH[72];\
775 uint8_t halfV[64];\
776 uint8_t halfHV[64];\
777 copy_block9(full, src, 16, stride, 9);\
778 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
779 put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
780 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
781 OPNAME ## pixels8_l4_aligned0(dst, full+1, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
783 static void OPNAME ## qpel8_mc31_c(uint8_t *dst, uint8_t *src, int stride){\
784 uint8_t full[16*9];\
785 uint8_t halfH[72];\
786 uint8_t halfHV[64];\
787 copy_block9(full, src, 16, stride, 9);\
788 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
789 put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
790 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
791 OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
793 static void ff_ ## OPNAME ## qpel8_mc13_old_c(uint8_t *dst, uint8_t *src, int stride){\
794 uint8_t full[16*9];\
795 uint8_t halfH[72];\
796 uint8_t halfV[64];\
797 uint8_t halfHV[64];\
798 copy_block9(full, src, 16, stride, 9);\
799 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
800 put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
801 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
802 OPNAME ## pixels8_l4_aligned(dst, full+16, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
804 static void OPNAME ## qpel8_mc13_c(uint8_t *dst, uint8_t *src, int stride){\
805 uint8_t full[16*9];\
806 uint8_t halfH[72];\
807 uint8_t halfHV[64];\
808 copy_block9(full, src, 16, stride, 9);\
809 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
810 put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
811 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
812 OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
814 static void ff_ ## OPNAME ## qpel8_mc33_old_c(uint8_t *dst, uint8_t *src, int stride){\
815 uint8_t full[16*9];\
816 uint8_t halfH[72];\
817 uint8_t halfV[64];\
818 uint8_t halfHV[64];\
819 copy_block9(full, src, 16, stride, 9);\
820 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full , 8, 16, 9);\
821 put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
822 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
823 OPNAME ## pixels8_l4_aligned0(dst, full+17, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
825 static void OPNAME ## qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride){\
826 uint8_t full[16*9];\
827 uint8_t halfH[72];\
828 uint8_t halfHV[64];\
829 copy_block9(full, src, 16, stride, 9);\
830 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
831 put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
832 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
833 OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
835 static void OPNAME ## qpel8_mc21_c(uint8_t *dst, uint8_t *src, int stride){\
836 uint8_t halfH[72];\
837 uint8_t halfHV[64];\
838 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
839 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
840 OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
842 static void OPNAME ## qpel8_mc23_c(uint8_t *dst, uint8_t *src, int stride){\
843 uint8_t halfH[72];\
844 uint8_t halfHV[64];\
845 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
846 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
847 OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
849 static void ff_ ## OPNAME ## qpel8_mc12_old_c(uint8_t *dst, uint8_t *src, int stride){\
850 uint8_t full[16*9];\
851 uint8_t halfH[72];\
852 uint8_t halfV[64];\
853 uint8_t halfHV[64];\
854 copy_block9(full, src, 16, stride, 9);\
855 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
856 put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
857 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
858 OPNAME ## pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);\
860 static void OPNAME ## qpel8_mc12_c(uint8_t *dst, uint8_t *src, int stride){\
861 uint8_t full[16*9];\
862 uint8_t halfH[72];\
863 copy_block9(full, src, 16, stride, 9);\
864 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
865 put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
866 OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
868 static void ff_ ## OPNAME ## qpel8_mc32_old_c(uint8_t *dst, uint8_t *src, int stride){\
869 uint8_t full[16*9];\
870 uint8_t halfH[72];\
871 uint8_t halfV[64];\
872 uint8_t halfHV[64];\
873 copy_block9(full, src, 16, stride, 9);\
874 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
875 put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
876 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
877 OPNAME ## pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);\
879 static void OPNAME ## qpel8_mc32_c(uint8_t *dst, uint8_t *src, int stride){\
880 uint8_t full[16*9];\
881 uint8_t halfH[72];\
882 copy_block9(full, src, 16, stride, 9);\
883 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
884 put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
885 OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
887 static void OPNAME ## qpel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){\
888 uint8_t halfH[72];\
889 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
890 OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
892 static void OPNAME ## qpel16_mc00_c (uint8_t *dst, uint8_t *src, int stride){\
893 OPNAME ## pixels16_c(dst, src, stride, 16);\
896 static void OPNAME ## qpel16_mc10_c(uint8_t *dst, uint8_t *src, int stride){\
897 uint8_t half[256];\
898 put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
899 OPNAME ## pixels16_l2_aligned2(dst, src, half, stride, stride, 16, 16);\
902 static void OPNAME ## qpel16_mc20_c(uint8_t *dst, uint8_t *src, int stride){\
903 OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16);\
906 static void OPNAME ## qpel16_mc30_c(uint8_t *dst, uint8_t *src, int stride){\
907 uint8_t half[256];\
908 put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
909 OPNAME ## pixels16_l2_aligned2(dst, src+1, half, stride, stride, 16, 16);\
912 static void OPNAME ## qpel16_mc01_c(uint8_t *dst, uint8_t *src, int stride){\
913 uint8_t full[24*17];\
914 uint8_t half[256];\
915 copy_block17(full, src, 24, stride, 17);\
916 put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
917 OPNAME ## pixels16_l2_aligned(dst, full, half, stride, 24, 16, 16);\
920 static void OPNAME ## qpel16_mc02_c(uint8_t *dst, uint8_t *src, int stride){\
921 uint8_t full[24*17];\
922 copy_block17(full, src, 24, stride, 17);\
923 OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24);\
926 static void OPNAME ## qpel16_mc03_c(uint8_t *dst, uint8_t *src, int stride){\
927 uint8_t full[24*17];\
928 uint8_t half[256];\
929 copy_block17(full, src, 24, stride, 17);\
930 put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
931 OPNAME ## pixels16_l2_aligned(dst, full+24, half, stride, 24, 16, 16);\
933 static void ff_ ## OPNAME ## qpel16_mc11_old_c(uint8_t *dst, uint8_t *src, int stride){\
934 uint8_t full[24*17];\
935 uint8_t halfH[272];\
936 uint8_t halfV[256];\
937 uint8_t halfHV[256];\
938 copy_block17(full, src, 24, stride, 17);\
939 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
940 put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
941 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
942 OPNAME ## pixels16_l4_aligned(dst, full, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
944 static void OPNAME ## qpel16_mc11_c(uint8_t *dst, uint8_t *src, int stride){\
945 uint8_t full[24*17];\
946 uint8_t halfH[272];\
947 uint8_t halfHV[256];\
948 copy_block17(full, src, 24, stride, 17);\
949 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
950 put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
951 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
952 OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
954 static void ff_ ## OPNAME ## qpel16_mc31_old_c(uint8_t *dst, uint8_t *src, int stride){\
955 uint8_t full[24*17];\
956 uint8_t halfH[272];\
957 uint8_t halfV[256];\
958 uint8_t halfHV[256];\
959 copy_block17(full, src, 24, stride, 17);\
960 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
961 put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
962 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
963 OPNAME ## pixels16_l4_aligned0(dst, full+1, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
965 static void OPNAME ## qpel16_mc31_c(uint8_t *dst, uint8_t *src, int stride){\
966 uint8_t full[24*17];\
967 uint8_t halfH[272];\
968 uint8_t halfHV[256];\
969 copy_block17(full, src, 24, stride, 17);\
970 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
971 put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
972 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
973 OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
975 static void ff_ ## OPNAME ## qpel16_mc13_old_c(uint8_t *dst, uint8_t *src, int stride){\
976 uint8_t full[24*17];\
977 uint8_t halfH[272];\
978 uint8_t halfV[256];\
979 uint8_t halfHV[256];\
980 copy_block17(full, src, 24, stride, 17);\
981 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
982 put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
983 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
984 OPNAME ## pixels16_l4_aligned(dst, full+24, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
986 static void OPNAME ## qpel16_mc13_c(uint8_t *dst, uint8_t *src, int stride){\
987 uint8_t full[24*17];\
988 uint8_t halfH[272];\
989 uint8_t halfHV[256];\
990 copy_block17(full, src, 24, stride, 17);\
991 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
992 put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
993 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
994 OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
996 static void ff_ ## OPNAME ## qpel16_mc33_old_c(uint8_t *dst, uint8_t *src, int stride){\
997 uint8_t full[24*17];\
998 uint8_t halfH[272];\
999 uint8_t halfV[256];\
1000 uint8_t halfHV[256];\
1001 copy_block17(full, src, 24, stride, 17);\
1002 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full , 16, 24, 17);\
1003 put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
1004 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1005 OPNAME ## pixels16_l4_aligned0(dst, full+25, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
1007 static void OPNAME ## qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride){\
1008 uint8_t full[24*17];\
1009 uint8_t halfH[272];\
1010 uint8_t halfHV[256];\
1011 copy_block17(full, src, 24, stride, 17);\
1012 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
1013 put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
1014 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1015 OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
1017 static void OPNAME ## qpel16_mc21_c(uint8_t *dst, uint8_t *src, int stride){\
1018 uint8_t halfH[272];\
1019 uint8_t halfHV[256];\
1020 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
1021 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1022 OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
1024 static void OPNAME ## qpel16_mc23_c(uint8_t *dst, uint8_t *src, int stride){\
1025 uint8_t halfH[272];\
1026 uint8_t halfHV[256];\
1027 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
1028 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1029 OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
1031 static void ff_ ## OPNAME ## qpel16_mc12_old_c(uint8_t *dst, uint8_t *src, int stride){\
1032 uint8_t full[24*17];\
1033 uint8_t halfH[272];\
1034 uint8_t halfV[256];\
1035 uint8_t halfHV[256];\
1036 copy_block17(full, src, 24, stride, 17);\
1037 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
1038 put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
1039 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1040 OPNAME ## pixels16_l2_aligned(dst, halfV, halfHV, stride, 16, 16, 16);\
1042 static void OPNAME ## qpel16_mc12_c(uint8_t *dst, uint8_t *src, int stride){\
1043 uint8_t full[24*17];\
1044 uint8_t halfH[272];\
1045 copy_block17(full, src, 24, stride, 17);\
1046 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
1047 put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
1048 OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
1050 static void ff_ ## OPNAME ## qpel16_mc32_old_c(uint8_t *dst, uint8_t *src, int stride){\
1051 uint8_t full[24*17];\
1052 uint8_t halfH[272];\
1053 uint8_t halfV[256];\
1054 uint8_t halfHV[256];\
1055 copy_block17(full, src, 24, stride, 17);\
1056 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
1057 put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
1058 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1059 OPNAME ## pixels16_l2_aligned(dst, halfV, halfHV, stride, 16, 16, 16);\
1061 static void OPNAME ## qpel16_mc32_c(uint8_t *dst, uint8_t *src, int stride){\
1062 uint8_t full[24*17];\
1063 uint8_t halfH[272];\
1064 copy_block17(full, src, 24, stride, 17);\
1065 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
1066 put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
1067 OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
1069 static void OPNAME ## qpel16_mc22_c(uint8_t *dst, uint8_t *src, int stride){\
1070 uint8_t halfH[272];\
1071 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
1072 OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
1075 #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
1076 #define op_avg_no_rnd(a, b) a = (((a)+cm[((b) + 15)>>5])>>1)
1077 #define op_put(a, b) a = cm[((b) + 16)>>5]
1078 #define op_put_no_rnd(a, b) a = cm[((b) + 15)>>5]
1080 QPEL_MC(0, put_ , _ , op_put)
1081 QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
1082 QPEL_MC(0, avg_ , _ , op_avg)
1083 //QPEL_MC(1, avg_no_rnd , _ , op_avg)
1084 #undef op_avg
1085 #undef op_avg_no_rnd
1086 #undef op_put
1087 #undef op_put_no_rnd
1089 #if 1
1090 #define H264_LOWPASS(OPNAME, OP, OP2) \
1091 static inline void OPNAME ## h264_qpel_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,int w,int h){\
1092 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
1093 do {\
1094 int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
1095 uint8_t *s = src-2;\
1096 srcB = *s++;\
1097 srcA = *s++;\
1098 src0 = *s++;\
1099 src1 = *s++;\
1100 src2 = *s++;\
1101 src3 = *s++;\
1102 OP(dst[0], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
1103 src4 = *s++;\
1104 OP(dst[1], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
1105 src5 = *s++;\
1106 OP(dst[2], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
1107 src6 = *s++;\
1108 OP(dst[3], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
1109 if (w>4) { /* it optimized */ \
1110 int src7,src8,src9,src10; \
1111 src7 = *s++;\
1112 OP(dst[4], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
1113 src8 = *s++;\
1114 OP(dst[5], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
1115 src9 = *s++;\
1116 OP(dst[6], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
1117 src10 = *s++;\
1118 OP(dst[7], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
1119 if (w>8) { \
1120 int src11,src12,src13,src14,src15,src16,src17,src18; \
1121 src11 = *s++;\
1122 OP(dst[8] , (src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));\
1123 src12 = *s++;\
1124 OP(dst[9] , (src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));\
1125 src13 = *s++;\
1126 OP(dst[10], (src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));\
1127 src14 = *s++;\
1128 OP(dst[11], (src11+src12)*20 - (src10+src13)*5 + (src9 +src14));\
1129 src15 = *s++;\
1130 OP(dst[12], (src12+src13)*20 - (src11+src14)*5 + (src10+src15));\
1131 src16 = *s++;\
1132 OP(dst[13], (src13+src14)*20 - (src12+src15)*5 + (src11+src16));\
1133 src17 = *s++;\
1134 OP(dst[14], (src14+src15)*20 - (src13+src16)*5 + (src12+src17));\
1135 src18 = *s++;\
1136 OP(dst[15], (src15+src16)*20 - (src14+src17)*5 + (src13+src18));\
1139 dst+=dstStride;\
1140 src+=srcStride;\
1141 }while(--h);\
1144 static inline void OPNAME ## h264_qpel_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,int w,int h){\
1145 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
1146 do{\
1147 int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
1148 uint8_t *s = src-2*srcStride,*d=dst;\
1149 srcB = *s; s+=srcStride;\
1150 srcA = *s; s+=srcStride;\
1151 src0 = *s; s+=srcStride;\
1152 src1 = *s; s+=srcStride;\
1153 src2 = *s; s+=srcStride;\
1154 src3 = *s; s+=srcStride;\
1155 OP(*d, (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));d+=dstStride;\
1156 src4 = *s; s+=srcStride;\
1157 OP(*d, (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));d+=dstStride;\
1158 src5 = *s; s+=srcStride;\
1159 OP(*d, (src2+src3)*20 - (src1+src4)*5 + (src0+src5));d+=dstStride;\
1160 src6 = *s; s+=srcStride;\
1161 OP(*d, (src3+src4)*20 - (src2+src5)*5 + (src1+src6));d+=dstStride;\
1162 if (h>4) { \
1163 int src7,src8,src9,src10; \
1164 src7 = *s; s+=srcStride;\
1165 OP(*d, (src4+src5)*20 - (src3+src6)*5 + (src2+src7));d+=dstStride;\
1166 src8 = *s; s+=srcStride;\
1167 OP(*d, (src5+src6)*20 - (src4+src7)*5 + (src3+src8));d+=dstStride;\
1168 src9 = *s; s+=srcStride;\
1169 OP(*d, (src6+src7)*20 - (src5+src8)*5 + (src4+src9));d+=dstStride;\
1170 src10 = *s; s+=srcStride;\
1171 OP(*d, (src7+src8)*20 - (src6+src9)*5 + (src5+src10));d+=dstStride;\
1172 if (h>8) { \
1173 int src11,src12,src13,src14,src15,src16,src17,src18; \
1174 src11 = *s; s+=srcStride;\
1175 OP(*d , (src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));d+=dstStride;\
1176 src12 = *s; s+=srcStride;\
1177 OP(*d , (src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));d+=dstStride;\
1178 src13 = *s; s+=srcStride;\
1179 OP(*d, (src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));d+=dstStride;\
1180 src14 = *s; s+=srcStride;\
1181 OP(*d, (src11+src12)*20 - (src10+src13)*5 + (src9 +src14));d+=dstStride;\
1182 src15 = *s; s+=srcStride;\
1183 OP(*d, (src12+src13)*20 - (src11+src14)*5 + (src10+src15));d+=dstStride;\
1184 src16 = *s; s+=srcStride;\
1185 OP(*d, (src13+src14)*20 - (src12+src15)*5 + (src11+src16));d+=dstStride;\
1186 src17 = *s; s+=srcStride;\
1187 OP(*d, (src14+src15)*20 - (src13+src16)*5 + (src12+src17));d+=dstStride;\
1188 src18 = *s; s+=srcStride;\
1189 OP(*d, (src15+src16)*20 - (src14+src17)*5 + (src13+src18));d+=dstStride;\
1192 dst++;\
1193 src++;\
1194 }while(--w);\
1197 static inline void OPNAME ## h264_qpel_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride,int w,int h){\
1198 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
1199 int i;\
1200 src -= 2*srcStride;\
1201 i= h+5; \
1202 do {\
1203 int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
1204 uint8_t *s = src-2;\
1205 srcB = *s++;\
1206 srcA = *s++;\
1207 src0 = *s++;\
1208 src1 = *s++;\
1209 src2 = *s++;\
1210 src3 = *s++;\
1211 tmp[0] = ((src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
1212 src4 = *s++;\
1213 tmp[1] = ((src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
1214 src5 = *s++;\
1215 tmp[2] = ((src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
1216 src6 = *s++;\
1217 tmp[3] = ((src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
1218 if (w>4) { /* it optimized */ \
1219 int src7,src8,src9,src10; \
1220 src7 = *s++;\
1221 tmp[4] = ((src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
1222 src8 = *s++;\
1223 tmp[5] = ((src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
1224 src9 = *s++;\
1225 tmp[6] = ((src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
1226 src10 = *s++;\
1227 tmp[7] = ((src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
1228 if (w>8) { \
1229 int src11,src12,src13,src14,src15,src16,src17,src18; \
1230 src11 = *s++;\
1231 tmp[8] = ((src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));\
1232 src12 = *s++;\
1233 tmp[9] = ((src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));\
1234 src13 = *s++;\
1235 tmp[10] = ((src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));\
1236 src14 = *s++;\
1237 tmp[11] = ((src11+src12)*20 - (src10+src13)*5 + (src9 +src14));\
1238 src15 = *s++;\
1239 tmp[12] = ((src12+src13)*20 - (src11+src14)*5 + (src10+src15));\
1240 src16 = *s++;\
1241 tmp[13] = ((src13+src14)*20 - (src12+src15)*5 + (src11+src16));\
1242 src17 = *s++;\
1243 tmp[14] = ((src14+src15)*20 - (src13+src16)*5 + (src12+src17));\
1244 src18 = *s++;\
1245 tmp[15] = ((src15+src16)*20 - (src14+src17)*5 + (src13+src18));\
1248 tmp+=tmpStride;\
1249 src+=srcStride;\
1250 }while(--i);\
1251 tmp -= tmpStride*(h+5-2);\
1252 i = w; \
1253 do {\
1254 int tmpB,tmpA,tmp0,tmp1,tmp2,tmp3,tmp4,tmp5,tmp6;\
1255 int16_t *s = tmp-2*tmpStride; \
1256 uint8_t *d=dst;\
1257 tmpB = *s; s+=tmpStride;\
1258 tmpA = *s; s+=tmpStride;\
1259 tmp0 = *s; s+=tmpStride;\
1260 tmp1 = *s; s+=tmpStride;\
1261 tmp2 = *s; s+=tmpStride;\
1262 tmp3 = *s; s+=tmpStride;\
1263 OP2(*d, (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));d+=dstStride;\
1264 tmp4 = *s; s+=tmpStride;\
1265 OP2(*d, (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));d+=dstStride;\
1266 tmp5 = *s; s+=tmpStride;\
1267 OP2(*d, (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));d+=dstStride;\
1268 tmp6 = *s; s+=tmpStride;\
1269 OP2(*d, (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));d+=dstStride;\
1270 if (h>4) { \
1271 int tmp7,tmp8,tmp9,tmp10; \
1272 tmp7 = *s; s+=tmpStride;\
1273 OP2(*d, (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));d+=dstStride;\
1274 tmp8 = *s; s+=tmpStride;\
1275 OP2(*d, (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));d+=dstStride;\
1276 tmp9 = *s; s+=tmpStride;\
1277 OP2(*d, (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));d+=dstStride;\
1278 tmp10 = *s; s+=tmpStride;\
1279 OP2(*d, (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));d+=dstStride;\
1280 if (h>8) { \
1281 int tmp11,tmp12,tmp13,tmp14,tmp15,tmp16,tmp17,tmp18; \
1282 tmp11 = *s; s+=tmpStride;\
1283 OP2(*d , (tmp8 +tmp9 )*20 - (tmp7 +tmp10)*5 + (tmp6 +tmp11));d+=dstStride;\
1284 tmp12 = *s; s+=tmpStride;\
1285 OP2(*d , (tmp9 +tmp10)*20 - (tmp8 +tmp11)*5 + (tmp7 +tmp12));d+=dstStride;\
1286 tmp13 = *s; s+=tmpStride;\
1287 OP2(*d, (tmp10+tmp11)*20 - (tmp9 +tmp12)*5 + (tmp8 +tmp13));d+=dstStride;\
1288 tmp14 = *s; s+=tmpStride;\
1289 OP2(*d, (tmp11+tmp12)*20 - (tmp10+tmp13)*5 + (tmp9 +tmp14));d+=dstStride;\
1290 tmp15 = *s; s+=tmpStride;\
1291 OP2(*d, (tmp12+tmp13)*20 - (tmp11+tmp14)*5 + (tmp10+tmp15));d+=dstStride;\
1292 tmp16 = *s; s+=tmpStride;\
1293 OP2(*d, (tmp13+tmp14)*20 - (tmp12+tmp15)*5 + (tmp11+tmp16));d+=dstStride;\
1294 tmp17 = *s; s+=tmpStride;\
1295 OP2(*d, (tmp14+tmp15)*20 - (tmp13+tmp16)*5 + (tmp12+tmp17));d+=dstStride;\
1296 tmp18 = *s; s+=tmpStride;\
1297 OP2(*d, (tmp15+tmp16)*20 - (tmp14+tmp17)*5 + (tmp13+tmp18));d+=dstStride;\
1300 dst++;\
1301 tmp++;\
1302 }while(--i);\
1305 static void OPNAME ## h264_qpel4_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1306 OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,4,4); \
1308 static void OPNAME ## h264_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1309 OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,8,8); \
1311 static void OPNAME ## h264_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1312 OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,16,16); \
1315 static void OPNAME ## h264_qpel4_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1316 OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,4,4); \
1318 static void OPNAME ## h264_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1319 OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,8,8); \
1321 static void OPNAME ## h264_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1322 OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,16,16); \
1324 static void OPNAME ## h264_qpel4_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1325 OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,4,4); \
1327 static void OPNAME ## h264_qpel8_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1328 OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,8,8); \
1330 static void OPNAME ## h264_qpel16_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1331 OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,16,16); \
1334 #define H264_MC(OPNAME, SIZE) \
1335 static void OPNAME ## h264_qpel ## SIZE ## _mc00_c (uint8_t *dst, uint8_t *src, int stride){\
1336 OPNAME ## pixels ## SIZE ## _c(dst, src, stride, SIZE);\
1339 static void OPNAME ## h264_qpel ## SIZE ## _mc10_c(uint8_t *dst, uint8_t *src, int stride){\
1340 uint8_t half[SIZE*SIZE];\
1341 put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
1342 OPNAME ## pixels ## SIZE ## _l2_aligned2(dst, src, half, stride, stride, SIZE, SIZE);\
1345 static void OPNAME ## h264_qpel ## SIZE ## _mc20_c(uint8_t *dst, uint8_t *src, int stride){\
1346 OPNAME ## h264_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride);\
1349 static void OPNAME ## h264_qpel ## SIZE ## _mc30_c(uint8_t *dst, uint8_t *src, int stride){\
1350 uint8_t half[SIZE*SIZE];\
1351 put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
1352 OPNAME ## pixels ## SIZE ## _l2_aligned2(dst, src+1, half, stride, stride, SIZE, SIZE);\
1355 static void OPNAME ## h264_qpel ## SIZE ## _mc01_c(uint8_t *dst, uint8_t *src, int stride){\
1356 uint8_t full[SIZE*(SIZE+5)];\
1357 uint8_t * const full_mid= full + SIZE*2;\
1358 uint8_t half[SIZE*SIZE];\
1359 copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
1360 put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
1361 OPNAME ## pixels ## SIZE ## _l2_aligned(dst, full_mid, half, stride, SIZE, SIZE, SIZE);\
1364 static void OPNAME ## h264_qpel ## SIZE ## _mc02_c(uint8_t *dst, uint8_t *src, int stride){\
1365 uint8_t full[SIZE*(SIZE+5)];\
1366 uint8_t * const full_mid= full + SIZE*2;\
1367 copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
1368 OPNAME ## h264_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE);\
1371 static void OPNAME ## h264_qpel ## SIZE ## _mc03_c(uint8_t *dst, uint8_t *src, int stride){\
1372 uint8_t full[SIZE*(SIZE+5)];\
1373 uint8_t * const full_mid= full + SIZE*2;\
1374 uint8_t half[SIZE*SIZE];\
1375 copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
1376 put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
1377 OPNAME ## pixels ## SIZE ## _l2_aligned(dst, full_mid+SIZE, half, stride, SIZE, SIZE, SIZE);\
1380 static void OPNAME ## h264_qpel ## SIZE ## _mc11_c(uint8_t *dst, uint8_t *src, int stride){\
1381 uint8_t full[SIZE*(SIZE+5)];\
1382 uint8_t * const full_mid= full + SIZE*2;\
1383 uint8_t halfH[SIZE*SIZE];\
1384 uint8_t halfV[SIZE*SIZE];\
1385 put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
1386 copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
1387 put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
1388 OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
1391 static void OPNAME ## h264_qpel ## SIZE ## _mc31_c(uint8_t *dst, uint8_t *src, int stride){\
1392 uint8_t full[SIZE*(SIZE+5)];\
1393 uint8_t * const full_mid= full + SIZE*2;\
1394 uint8_t halfH[SIZE*SIZE];\
1395 uint8_t halfV[SIZE*SIZE];\
1396 put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
1397 copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
1398 put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
1399 OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
1402 static void OPNAME ## h264_qpel ## SIZE ## _mc13_c(uint8_t *dst, uint8_t *src, int stride){\
1403 uint8_t full[SIZE*(SIZE+5)];\
1404 uint8_t * const full_mid= full + SIZE*2;\
1405 uint8_t halfH[SIZE*SIZE];\
1406 uint8_t halfV[SIZE*SIZE];\
1407 put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
1408 copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
1409 put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
1410 OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
1413 static void OPNAME ## h264_qpel ## SIZE ## _mc33_c(uint8_t *dst, uint8_t *src, int stride){\
1414 uint8_t full[SIZE*(SIZE+5)];\
1415 uint8_t * const full_mid= full + SIZE*2;\
1416 uint8_t halfH[SIZE*SIZE];\
1417 uint8_t halfV[SIZE*SIZE];\
1418 put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
1419 copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
1420 put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
1421 OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
1424 static void OPNAME ## h264_qpel ## SIZE ## _mc22_c(uint8_t *dst, uint8_t *src, int stride){\
1425 int16_t tmp[SIZE*(SIZE+5)];\
1426 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass(dst, tmp, src, stride, SIZE, stride);\
1429 static void OPNAME ## h264_qpel ## SIZE ## _mc21_c(uint8_t *dst, uint8_t *src, int stride){\
1430 int16_t tmp[SIZE*(SIZE+5)];\
1431 uint8_t halfH[SIZE*SIZE];\
1432 uint8_t halfHV[SIZE*SIZE];\
1433 put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
1434 put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
1435 OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
1438 static void OPNAME ## h264_qpel ## SIZE ## _mc23_c(uint8_t *dst, uint8_t *src, int stride){\
1439 int16_t tmp[SIZE*(SIZE+5)];\
1440 uint8_t halfH[SIZE*SIZE];\
1441 uint8_t halfHV[SIZE*SIZE];\
1442 put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
1443 put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
1444 OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
1447 static void OPNAME ## h264_qpel ## SIZE ## _mc12_c(uint8_t *dst, uint8_t *src, int stride){\
1448 uint8_t full[SIZE*(SIZE+5)];\
1449 uint8_t * const full_mid= full + SIZE*2;\
1450 int16_t tmp[SIZE*(SIZE+5)];\
1451 uint8_t halfV[SIZE*SIZE];\
1452 uint8_t halfHV[SIZE*SIZE];\
1453 copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
1454 put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
1455 put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
1456 OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
1459 static void OPNAME ## h264_qpel ## SIZE ## _mc32_c(uint8_t *dst, uint8_t *src, int stride){\
1460 uint8_t full[SIZE*(SIZE+5)];\
1461 uint8_t * const full_mid= full + SIZE*2;\
1462 int16_t tmp[SIZE*(SIZE+5)];\
1463 uint8_t halfV[SIZE*SIZE];\
1464 uint8_t halfHV[SIZE*SIZE];\
1465 copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
1466 put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
1467 put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
1468 OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
1471 #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
1472 //#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
1473 #define op_put(a, b) a = cm[((b) + 16)>>5]
1474 #define op2_avg(a, b) a = (((a)+cm[((b) + 512)>>10]+1)>>1)
1475 #define op2_put(a, b) a = cm[((b) + 512)>>10]
1477 H264_LOWPASS(put_ , op_put, op2_put)
1478 H264_LOWPASS(avg_ , op_avg, op2_avg)
1479 H264_MC(put_, 4)
1480 H264_MC(put_, 8)
1481 H264_MC(put_, 16)
1482 H264_MC(avg_, 4)
1483 H264_MC(avg_, 8)
1484 H264_MC(avg_, 16)
1486 #undef op_avg
1487 #undef op_put
1488 #undef op2_avg
1489 #undef op2_put
1490 #endif
1492 static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
1493 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
1496 int src_1,src0,src1,src2,src3,src4,src5,src6,src7,src8,src9;
1497 uint8_t *s = src;
1498 src_1 = s[-1];
1499 src0 = *s++;
1500 src1 = *s++;
1501 src2 = *s++;
1502 dst[0]= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4];
1503 src3 = *s++;
1504 dst[1]= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4];
1505 src4 = *s++;
1506 dst[2]= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4];
1507 src5 = *s++;
1508 dst[3]= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4];
1509 src6 = *s++;
1510 dst[4]= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4];
1511 src7 = *s++;
1512 dst[5]= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4];
1513 src8 = *s++;
1514 dst[6]= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4];
1515 src9 = *s++;
1516 dst[7]= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4];
1517 dst+=dstStride;
1518 src+=srcStride;
1519 }while(--h);
1522 static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int w){
1523 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
1526 int src_1,src0,src1,src2,src3,src4,src5,src6,src7,src8,src9;
1527 uint8_t *s = src,*d = dst;
1528 src_1 = *(s-srcStride);
1529 src0 = *s; s+=srcStride;
1530 src1 = *s; s+=srcStride;
1531 src2 = *s; s+=srcStride;
1532 *d= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4]; d+=dstStride;
1533 src3 = *s; s+=srcStride;
1534 *d= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4]; d+=dstStride;
1535 src4 = *s; s+=srcStride;
1536 *d= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4]; d+=dstStride;
1537 src5 = *s; s+=srcStride;
1538 *d= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4]; d+=dstStride;
1539 src6 = *s; s+=srcStride;
1540 *d= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4]; d+=dstStride;
1541 src7 = *s; s+=srcStride;
1542 *d= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4]; d+=dstStride;
1543 src8 = *s; s+=srcStride;
1544 *d= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4]; d+=dstStride;
1545 src9 = *s;
1546 *d= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4]; d+=dstStride;
1547 src++;
1548 dst++;
1549 }while(--w);
1552 static void put_mspel8_mc00_c (uint8_t *dst, uint8_t *src, int stride){
1553 put_pixels8_c(dst, src, stride, 8);
1556 static void put_mspel8_mc10_c(uint8_t *dst, uint8_t *src, int stride){
1557 uint8_t half[64];
1558 wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
1559 put_pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);
1562 static void put_mspel8_mc20_c(uint8_t *dst, uint8_t *src, int stride){
1563 wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
1566 static void put_mspel8_mc30_c(uint8_t *dst, uint8_t *src, int stride){
1567 uint8_t half[64];
1568 wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
1569 put_pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);
1572 static void put_mspel8_mc02_c(uint8_t *dst, uint8_t *src, int stride){
1573 wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
1576 static void put_mspel8_mc12_c(uint8_t *dst, uint8_t *src, int stride){
1577 uint8_t halfH[88];
1578 uint8_t halfV[64];
1579 uint8_t halfHV[64];
1580 wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
1581 wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
1582 wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
1583 put_pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);
1585 static void put_mspel8_mc32_c(uint8_t *dst, uint8_t *src, int stride){
1586 uint8_t halfH[88];
1587 uint8_t halfV[64];
1588 uint8_t halfHV[64];
1589 wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
1590 wmv2_mspel8_v_lowpass(halfV, src+1, 8, stride, 8);
1591 wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
1592 put_pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);
1594 static void put_mspel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){
1595 uint8_t halfH[88];
1596 wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
1597 wmv2_mspel8_v_lowpass(dst, halfH+8, stride, 8, 8);