avformat/mpeg: demux ivtv captions
[ffmpeg.git] / libavcodec / cavsdsp.c
blob69420242d688c48c4ff3367626c63fd39e2d0a2b
1 /*
2 * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
4 * DSP functions
6 * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "idctdsp.h"
26 #include "mathops.h"
27 #include "cavsdsp.h"
28 #include "libavutil/common.h"
30 /*****************************************************************************
32 * in-loop deblocking filter
34 ****************************************************************************/
36 #define P2 p0_p[-3*stride]
37 #define P1 p0_p[-2*stride]
38 #define P0 p0_p[-1*stride]
39 #define Q0 p0_p[ 0*stride]
40 #define Q1 p0_p[ 1*stride]
41 #define Q2 p0_p[ 2*stride]
43 static inline void loop_filter_l2(uint8_t *p0_p, ptrdiff_t stride, int alpha, int beta)
45 int p0 = P0;
46 int q0 = Q0;
48 if(abs(p0-q0)<alpha && abs(P1-p0)<beta && abs(Q1-q0)<beta) {
49 int s = p0 + q0 + 2;
50 alpha = (alpha>>2) + 2;
51 if(abs(P2-p0) < beta && abs(p0-q0) < alpha) {
52 P0 = (P1 + p0 + s) >> 2;
53 P1 = (2*P1 + s) >> 2;
54 } else
55 P0 = (2*P1 + s) >> 2;
56 if(abs(Q2-q0) < beta && abs(q0-p0) < alpha) {
57 Q0 = (Q1 + q0 + s) >> 2;
58 Q1 = (2*Q1 + s) >> 2;
59 } else
60 Q0 = (2*Q1 + s) >> 2;
64 static inline void loop_filter_l1(uint8_t *p0_p, ptrdiff_t stride, int alpha, int beta, int tc)
66 int p0 = P0;
67 int q0 = Q0;
69 if(abs(p0-q0)<alpha && abs(P1-p0)<beta && abs(Q1-q0)<beta) {
70 int delta = av_clip(((q0-p0)*3+P1-Q1+4)>>3,-tc, tc);
71 P0 = av_clip_uint8(p0+delta);
72 Q0 = av_clip_uint8(q0-delta);
73 if(abs(P2-p0)<beta) {
74 delta = av_clip(((P0-P1)*3+P2-Q0+4)>>3, -tc, tc);
75 P1 = av_clip_uint8(P1+delta);
77 if(abs(Q2-q0)<beta) {
78 delta = av_clip(((Q1-Q0)*3+P0-Q2+4)>>3, -tc, tc);
79 Q1 = av_clip_uint8(Q1-delta);
84 static inline void loop_filter_c2(uint8_t *p0_p, ptrdiff_t stride, int alpha, int beta)
86 int p0 = P0;
87 int q0 = Q0;
89 if(abs(p0-q0)<alpha && abs(P1-p0)<beta && abs(Q1-q0)<beta) {
90 int s = p0 + q0 + 2;
91 alpha = (alpha>>2) + 2;
92 if(abs(P2-p0) < beta && abs(p0-q0) < alpha) {
93 P0 = (P1 + p0 + s) >> 2;
94 } else
95 P0 = (2*P1 + s) >> 2;
96 if(abs(Q2-q0) < beta && abs(q0-p0) < alpha) {
97 Q0 = (Q1 + q0 + s) >> 2;
98 } else
99 Q0 = (2*Q1 + s) >> 2;
103 static inline void loop_filter_c1(uint8_t *p0_p, ptrdiff_t stride, int alpha, int beta,
104 int tc)
106 if(abs(P0-Q0)<alpha && abs(P1-P0)<beta && abs(Q1-Q0)<beta) {
107 int delta = av_clip(((Q0-P0)*3+P1-Q1+4)>>3, -tc, tc);
108 P0 = av_clip_uint8(P0+delta);
109 Q0 = av_clip_uint8(Q0-delta);
113 #undef P0
114 #undef P1
115 #undef P2
116 #undef Q0
117 #undef Q1
118 #undef Q2
120 static void cavs_filter_lv_c(uint8_t *d, ptrdiff_t stride, int alpha, int beta, int tc,
121 int bs1, int bs2)
123 int i;
124 if(bs1==2)
125 for(i=0;i<16;i++)
126 loop_filter_l2(d + i*stride,1,alpha,beta);
127 else {
128 if(bs1)
129 for(i=0;i<8;i++)
130 loop_filter_l1(d + i*stride,1,alpha,beta,tc);
131 if (bs2)
132 for(i=8;i<16;i++)
133 loop_filter_l1(d + i*stride,1,alpha,beta,tc);
137 static void cavs_filter_lh_c(uint8_t *d, ptrdiff_t stride, int alpha, int beta, int tc,
138 int bs1, int bs2)
140 int i;
141 if(bs1==2)
142 for(i=0;i<16;i++)
143 loop_filter_l2(d + i,stride,alpha,beta);
144 else {
145 if(bs1)
146 for(i=0;i<8;i++)
147 loop_filter_l1(d + i,stride,alpha,beta,tc);
148 if (bs2)
149 for(i=8;i<16;i++)
150 loop_filter_l1(d + i,stride,alpha,beta,tc);
154 static void cavs_filter_cv_c(uint8_t *d, ptrdiff_t stride, int alpha, int beta, int tc,
155 int bs1, int bs2)
157 int i;
158 if(bs1==2)
159 for(i=0;i<8;i++)
160 loop_filter_c2(d + i*stride,1,alpha,beta);
161 else {
162 if(bs1)
163 for(i=0;i<4;i++)
164 loop_filter_c1(d + i*stride,1,alpha,beta,tc);
165 if (bs2)
166 for(i=4;i<8;i++)
167 loop_filter_c1(d + i*stride,1,alpha,beta,tc);
171 static void cavs_filter_ch_c(uint8_t *d, ptrdiff_t stride, int alpha, int beta, int tc,
172 int bs1, int bs2)
174 int i;
175 if(bs1==2)
176 for(i=0;i<8;i++)
177 loop_filter_c2(d + i,stride,alpha,beta);
178 else {
179 if(bs1)
180 for(i=0;i<4;i++)
181 loop_filter_c1(d + i,stride,alpha,beta,tc);
182 if (bs2)
183 for(i=4;i<8;i++)
184 loop_filter_c1(d + i,stride,alpha,beta,tc);
188 /*****************************************************************************
190 * inverse transform
192 ****************************************************************************/
194 static void cavs_idct8_add_c(uint8_t *dst, int16_t *block, ptrdiff_t stride)
196 int i;
197 int16_t (*src)[8] = (int16_t(*)[8])block;
199 src[0][0] += 8;
201 for( i = 0; i < 8; i++ ) {
202 const int a0 = 3 * src[i][1] - 2 * src[i][7];
203 const int a1 = 3 * src[i][3] + 2 * src[i][5];
204 const int a2 = 2 * src[i][3] - 3 * src[i][5];
205 const int a3 = 2 * src[i][1] + 3 * src[i][7];
207 const int b4 = 2 * (a0 + a1 + a3) + a1;
208 const int b5 = 2 * (a0 - a1 + a2) + a0;
209 const int b6 = 2 * (a3 - a2 - a1) + a3;
210 const int b7 = 2 * (a0 - a2 - a3) - a2;
212 const int a7 = 4 * src[i][2] - 10 * src[i][6];
213 const int a6 = 4 * src[i][6] + 10 * src[i][2];
214 const int a5 = 8 * (src[i][0] - src[i][4]) + 4;
215 const int a4 = 8 * (src[i][0] + src[i][4]) + 4;
217 const int b0 = a4 + a6;
218 const int b1 = a5 + a7;
219 const int b2 = a5 - a7;
220 const int b3 = a4 - a6;
222 src[i][0] = (b0 + b4) >> 3;
223 src[i][1] = (b1 + b5) >> 3;
224 src[i][2] = (b2 + b6) >> 3;
225 src[i][3] = (b3 + b7) >> 3;
226 src[i][4] = (b3 - b7) >> 3;
227 src[i][5] = (b2 - b6) >> 3;
228 src[i][6] = (b1 - b5) >> 3;
229 src[i][7] = (b0 - b4) >> 3;
231 for( i = 0; i < 8; i++ ) {
232 const int a0 = 3 * src[1][i] - 2 * src[7][i];
233 const int a1 = 3 * src[3][i] + 2 * src[5][i];
234 const int a2 = 2 * src[3][i] - 3 * src[5][i];
235 const int a3 = 2 * src[1][i] + 3 * src[7][i];
237 const int b4 = 2 * (a0 + a1 + a3) + a1;
238 const int b5 = 2 * (a0 - a1 + a2) + a0;
239 const int b6 = 2 * (a3 - a2 - a1) + a3;
240 const int b7 = 2 * (a0 - a2 - a3) - a2;
242 const int a7 = 4 * src[2][i] - 10 * src[6][i];
243 const int a6 = 4 * src[6][i] + 10 * src[2][i];
244 const int a5 = 8 * (src[0][i] - src[4][i]);
245 const int a4 = 8 * (src[0][i] + src[4][i]);
247 const int b0 = a4 + a6;
248 const int b1 = a5 + a7;
249 const int b2 = a5 - a7;
250 const int b3 = a4 - a6;
252 dst[i + 0*stride] = av_clip_uint8( dst[i + 0*stride] + ((b0 + b4) >> 7));
253 dst[i + 1*stride] = av_clip_uint8( dst[i + 1*stride] + ((b1 + b5) >> 7));
254 dst[i + 2*stride] = av_clip_uint8( dst[i + 2*stride] + ((b2 + b6) >> 7));
255 dst[i + 3*stride] = av_clip_uint8( dst[i + 3*stride] + ((b3 + b7) >> 7));
256 dst[i + 4*stride] = av_clip_uint8( dst[i + 4*stride] + ((b3 - b7) >> 7));
257 dst[i + 5*stride] = av_clip_uint8( dst[i + 5*stride] + ((b2 - b6) >> 7));
258 dst[i + 6*stride] = av_clip_uint8( dst[i + 6*stride] + ((b1 - b5) >> 7));
259 dst[i + 7*stride] = av_clip_uint8( dst[i + 7*stride] + ((b0 - b4) >> 7));
263 /*****************************************************************************
265 * motion compensation
267 ****************************************************************************/
269 #define CAVS_SUBPIX(OPNAME, OP, NAME, A, B, C, D, E, F) \
270 static void OPNAME ## cavs_filt8_h_ ## NAME(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
272 const int h=8;\
273 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;\
274 int i;\
275 for(i=0; i<h; i++)\
277 OP(dst[0], A*src[-2] + B*src[-1] + C*src[0] + D*src[1] + E*src[2] + F*src[3]);\
278 OP(dst[1], A*src[-1] + B*src[ 0] + C*src[1] + D*src[2] + E*src[3] + F*src[4]);\
279 OP(dst[2], A*src[ 0] + B*src[ 1] + C*src[2] + D*src[3] + E*src[4] + F*src[5]);\
280 OP(dst[3], A*src[ 1] + B*src[ 2] + C*src[3] + D*src[4] + E*src[5] + F*src[6]);\
281 OP(dst[4], A*src[ 2] + B*src[ 3] + C*src[4] + D*src[5] + E*src[6] + F*src[7]);\
282 OP(dst[5], A*src[ 3] + B*src[ 4] + C*src[5] + D*src[6] + E*src[7] + F*src[8]);\
283 OP(dst[6], A*src[ 4] + B*src[ 5] + C*src[6] + D*src[7] + E*src[8] + F*src[9]);\
284 OP(dst[7], A*src[ 5] + B*src[ 6] + C*src[7] + D*src[8] + E*src[9] + F*src[10]);\
285 dst+=dstStride;\
286 src+=srcStride;\
290 static void OPNAME ## cavs_filt8_v_ ## NAME(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
292 const int w=8;\
293 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;\
294 int i;\
295 for(i=0; i<w; i++)\
297 const int srcB= src[-2*srcStride];\
298 const int srcA= src[-1*srcStride];\
299 const int src0= src[0 *srcStride];\
300 const int src1= src[1 *srcStride];\
301 const int src2= src[2 *srcStride];\
302 const int src3= src[3 *srcStride];\
303 const int src4= src[4 *srcStride];\
304 const int src5= src[5 *srcStride];\
305 const int src6= src[6 *srcStride];\
306 const int src7= src[7 *srcStride];\
307 const int src8= src[8 *srcStride];\
308 const int src9= src[9 *srcStride];\
309 const int src10= src[10 *srcStride];\
310 OP(dst[0*dstStride], A*srcB + B*srcA + C*src0 + D*src1 + E*src2 + F*src3);\
311 OP(dst[1*dstStride], A*srcA + B*src0 + C*src1 + D*src2 + E*src3 + F*src4);\
312 OP(dst[2*dstStride], A*src0 + B*src1 + C*src2 + D*src3 + E*src4 + F*src5);\
313 OP(dst[3*dstStride], A*src1 + B*src2 + C*src3 + D*src4 + E*src5 + F*src6);\
314 OP(dst[4*dstStride], A*src2 + B*src3 + C*src4 + D*src5 + E*src6 + F*src7);\
315 OP(dst[5*dstStride], A*src3 + B*src4 + C*src5 + D*src6 + E*src7 + F*src8);\
316 OP(dst[6*dstStride], A*src4 + B*src5 + C*src6 + D*src7 + E*src8 + F*src9);\
317 OP(dst[7*dstStride], A*src5 + B*src6 + C*src7 + D*src8 + E*src9 + F*src10);\
318 dst++;\
319 src++;\
323 static void OPNAME ## cavs_filt16_v_ ## NAME(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
325 OPNAME ## cavs_filt8_v_ ## NAME(dst , src , dstStride, srcStride);\
326 OPNAME ## cavs_filt8_v_ ## NAME(dst+8, src+8, dstStride, srcStride);\
327 src += 8*srcStride;\
328 dst += 8*dstStride;\
329 OPNAME ## cavs_filt8_v_ ## NAME(dst , src , dstStride, srcStride);\
330 OPNAME ## cavs_filt8_v_ ## NAME(dst+8, src+8, dstStride, srcStride);\
333 static void OPNAME ## cavs_filt16_h_ ## NAME(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
335 OPNAME ## cavs_filt8_h_ ## NAME(dst , src , dstStride, srcStride);\
336 OPNAME ## cavs_filt8_h_ ## NAME(dst+8, src+8, dstStride, srcStride);\
337 src += 8*srcStride;\
338 dst += 8*dstStride;\
339 OPNAME ## cavs_filt8_h_ ## NAME(dst , src , dstStride, srcStride);\
340 OPNAME ## cavs_filt8_h_ ## NAME(dst+8, src+8, dstStride, srcStride);\
343 #define CAVS_SUBPIX_HV(OPNAME, OP, NAME, AH, BH, CH, DH, EH, FH, AV, BV, CV, DV, EV, FV, FULL) \
344 static void OPNAME ## cavs_filt8_hv_ ## NAME(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, ptrdiff_t dstStride, ptrdiff_t srcStride)\
346 int16_t temp[8*(8+5)];\
347 int16_t *tmp = temp;\
348 const int h=8;\
349 const int w=8;\
350 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;\
351 int i;\
352 src1 -= 2*srcStride;\
353 for(i=0; i<h+5; i++)\
355 tmp[0]= AH*src1[-2] + BH*src1[-1] + CH*src1[0] + DH*src1[1] + EH*src1[2] + FH*src1[3];\
356 tmp[1]= AH*src1[-1] + BH*src1[ 0] + CH*src1[1] + DH*src1[2] + EH*src1[3] + FH*src1[4];\
357 tmp[2]= AH*src1[ 0] + BH*src1[ 1] + CH*src1[2] + DH*src1[3] + EH*src1[4] + FH*src1[5];\
358 tmp[3]= AH*src1[ 1] + BH*src1[ 2] + CH*src1[3] + DH*src1[4] + EH*src1[5] + FH*src1[6];\
359 tmp[4]= AH*src1[ 2] + BH*src1[ 3] + CH*src1[4] + DH*src1[5] + EH*src1[6] + FH*src1[7];\
360 tmp[5]= AH*src1[ 3] + BH*src1[ 4] + CH*src1[5] + DH*src1[6] + EH*src1[7] + FH*src1[8];\
361 tmp[6]= AH*src1[ 4] + BH*src1[ 5] + CH*src1[6] + DH*src1[7] + EH*src1[8] + FH*src1[9];\
362 tmp[7]= AH*src1[ 5] + BH*src1[ 6] + CH*src1[7] + DH*src1[8] + EH*src1[9] + FH*src1[10];\
363 tmp+=8;\
364 src1+=srcStride;\
366 if(FULL) {\
367 tmp = temp+8*2; \
368 for(i=0; i<w; i++) \
370 const int tmpB= tmp[-2*8]; \
371 const int tmpA= tmp[-1*8]; \
372 const int tmp0= tmp[0 *8]; \
373 const int tmp1= tmp[1 *8]; \
374 const int tmp2= tmp[2 *8]; \
375 const int tmp3= tmp[3 *8]; \
376 const int tmp4= tmp[4 *8]; \
377 const int tmp5= tmp[5 *8]; \
378 const int tmp6= tmp[6 *8]; \
379 const int tmp7= tmp[7 *8]; \
380 const int tmp8= tmp[8 *8]; \
381 const int tmp9= tmp[9 *8]; \
382 const int tmp10=tmp[10*8]; \
383 OP(dst[0*dstStride], AV*tmpB + BV*tmpA + CV*tmp0 + DV*tmp1 + EV*tmp2 + FV*tmp3 + 64*src2[0*srcStride]); \
384 OP(dst[1*dstStride], AV*tmpA + BV*tmp0 + CV*tmp1 + DV*tmp2 + EV*tmp3 + FV*tmp4 + 64*src2[1*srcStride]); \
385 OP(dst[2*dstStride], AV*tmp0 + BV*tmp1 + CV*tmp2 + DV*tmp3 + EV*tmp4 + FV*tmp5 + 64*src2[2*srcStride]); \
386 OP(dst[3*dstStride], AV*tmp1 + BV*tmp2 + CV*tmp3 + DV*tmp4 + EV*tmp5 + FV*tmp6 + 64*src2[3*srcStride]); \
387 OP(dst[4*dstStride], AV*tmp2 + BV*tmp3 + CV*tmp4 + DV*tmp5 + EV*tmp6 + FV*tmp7 + 64*src2[4*srcStride]); \
388 OP(dst[5*dstStride], AV*tmp3 + BV*tmp4 + CV*tmp5 + DV*tmp6 + EV*tmp7 + FV*tmp8 + 64*src2[5*srcStride]); \
389 OP(dst[6*dstStride], AV*tmp4 + BV*tmp5 + CV*tmp6 + DV*tmp7 + EV*tmp8 + FV*tmp9 + 64*src2[6*srcStride]); \
390 OP(dst[7*dstStride], AV*tmp5 + BV*tmp6 + CV*tmp7 + DV*tmp8 + EV*tmp9 + FV*tmp10 + 64*src2[7*srcStride]); \
391 dst++; \
392 tmp++; \
393 src2++; \
395 } else {\
396 tmp = temp+8*2; \
397 for(i=0; i<w; i++) \
399 const int tmpB= tmp[-2*8]; \
400 const int tmpA= tmp[-1*8]; \
401 const int tmp0= tmp[0 *8]; \
402 const int tmp1= tmp[1 *8]; \
403 const int tmp2= tmp[2 *8]; \
404 const int tmp3= tmp[3 *8]; \
405 const int tmp4= tmp[4 *8]; \
406 const int tmp5= tmp[5 *8]; \
407 const int tmp6= tmp[6 *8]; \
408 const int tmp7= tmp[7 *8]; \
409 const int tmp8= tmp[8 *8]; \
410 const int tmp9= tmp[9 *8]; \
411 const int tmp10=tmp[10*8]; \
412 OP(dst[0*dstStride], AV*tmpB + BV*tmpA + CV*tmp0 + DV*tmp1 + EV*tmp2 + FV*tmp3); \
413 OP(dst[1*dstStride], AV*tmpA + BV*tmp0 + CV*tmp1 + DV*tmp2 + EV*tmp3 + FV*tmp4); \
414 OP(dst[2*dstStride], AV*tmp0 + BV*tmp1 + CV*tmp2 + DV*tmp3 + EV*tmp4 + FV*tmp5); \
415 OP(dst[3*dstStride], AV*tmp1 + BV*tmp2 + CV*tmp3 + DV*tmp4 + EV*tmp5 + FV*tmp6); \
416 OP(dst[4*dstStride], AV*tmp2 + BV*tmp3 + CV*tmp4 + DV*tmp5 + EV*tmp6 + FV*tmp7); \
417 OP(dst[5*dstStride], AV*tmp3 + BV*tmp4 + CV*tmp5 + DV*tmp6 + EV*tmp7 + FV*tmp8); \
418 OP(dst[6*dstStride], AV*tmp4 + BV*tmp5 + CV*tmp6 + DV*tmp7 + EV*tmp8 + FV*tmp9); \
419 OP(dst[7*dstStride], AV*tmp5 + BV*tmp6 + CV*tmp7 + DV*tmp8 + EV*tmp9 + FV*tmp10); \
420 dst++; \
421 tmp++; \
426 static void OPNAME ## cavs_filt16_hv_ ## NAME(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, ptrdiff_t dstStride, ptrdiff_t srcStride)\
428 OPNAME ## cavs_filt8_hv_ ## NAME(dst , src1, FULL ? src2 : NULL, dstStride, srcStride); \
429 OPNAME ## cavs_filt8_hv_ ## NAME(dst+8, src1+8, FULL ? src2 + 8 : NULL, dstStride, srcStride); \
430 src1 += 8*srcStride;\
431 if (FULL) \
432 src2 += 8*srcStride;\
433 dst += 8*dstStride;\
434 OPNAME ## cavs_filt8_hv_ ## NAME(dst , src1, FULL ? src2 : NULL, dstStride, srcStride); \
435 OPNAME ## cavs_filt8_hv_ ## NAME(dst+8, src1+8, FULL ? src2 + 8 : NULL, dstStride, srcStride); \
438 #define CAVS_MC(OPNAME, SIZE) \
439 static void OPNAME ## cavs_qpel ## SIZE ## _mc10_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
441 OPNAME ## cavs_filt ## SIZE ## _h_qpel_l(dst, src, stride, stride);\
444 static void OPNAME ## cavs_qpel ## SIZE ## _mc20_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
446 OPNAME ## cavs_filt ## SIZE ## _h_hpel(dst, src, stride, stride);\
449 static void OPNAME ## cavs_qpel ## SIZE ## _mc30_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
451 OPNAME ## cavs_filt ## SIZE ## _h_qpel_r(dst, src, stride, stride);\
454 static void OPNAME ## cavs_qpel ## SIZE ## _mc01_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
456 OPNAME ## cavs_filt ## SIZE ## _v_qpel_l(dst, src, stride, stride);\
459 static void OPNAME ## cavs_qpel ## SIZE ## _mc02_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
461 OPNAME ## cavs_filt ## SIZE ## _v_hpel(dst, src, stride, stride);\
464 static void OPNAME ## cavs_qpel ## SIZE ## _mc03_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
466 OPNAME ## cavs_filt ## SIZE ## _v_qpel_r(dst, src, stride, stride);\
469 static void OPNAME ## cavs_qpel ## SIZE ## _mc22_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
471 OPNAME ## cavs_filt ## SIZE ## _hv_jj(dst, src, NULL, stride, stride); \
474 static void OPNAME ## cavs_qpel ## SIZE ## _mc11_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
476 OPNAME ## cavs_filt ## SIZE ## _hv_egpr(dst, src, src, stride, stride); \
479 static void OPNAME ## cavs_qpel ## SIZE ## _mc13_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
481 OPNAME ## cavs_filt ## SIZE ## _hv_egpr(dst, src, src+stride, stride, stride); \
484 static void OPNAME ## cavs_qpel ## SIZE ## _mc31_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
486 OPNAME ## cavs_filt ## SIZE ## _hv_egpr(dst, src, src+1, stride, stride); \
489 static void OPNAME ## cavs_qpel ## SIZE ## _mc33_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
491 OPNAME ## cavs_filt ## SIZE ## _hv_egpr(dst, src, src+stride+1,stride, stride); \
494 static void OPNAME ## cavs_qpel ## SIZE ## _mc21_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
496 OPNAME ## cavs_filt ## SIZE ## _hv_ff(dst, src, NULL, stride, stride); \
499 static void OPNAME ## cavs_qpel ## SIZE ## _mc12_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
501 OPNAME ## cavs_filt ## SIZE ## _hv_ii(dst, src, NULL, stride, stride); \
504 static void OPNAME ## cavs_qpel ## SIZE ## _mc32_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
506 OPNAME ## cavs_filt ## SIZE ## _hv_kk(dst, src, NULL, stride, stride); \
509 static void OPNAME ## cavs_qpel ## SIZE ## _mc23_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
511 OPNAME ## cavs_filt ## SIZE ## _hv_qq(dst, src, NULL, stride, stride); \
514 #define op_put1(a, b) a = cm[((b)+4)>>3]
515 #define op_put2(a, b) a = cm[((b)+64)>>7]
516 #define op_put3(a, b) a = cm[((b)+32)>>6]
517 #define op_put4(a, b) a = cm[((b)+512)>>10]
518 #define op_avg1(a, b) a = ((a)+cm[((b)+4)>>3] +1)>>1
519 #define op_avg2(a, b) a = ((a)+cm[((b)+64)>>7] +1)>>1
520 #define op_avg3(a, b) a = ((a)+cm[((b)+32)>>6] +1)>>1
521 #define op_avg4(a, b) a = ((a)+cm[((b)+512)>>10]+1)>>1
522 CAVS_SUBPIX(put_ , op_put1, hpel, 0, -1, 5, 5, -1, 0)
523 CAVS_SUBPIX(put_ , op_put2, qpel_l, -1, -2, 96, 42, -7, 0)
524 CAVS_SUBPIX(put_ , op_put2, qpel_r, 0, -7, 42, 96, -2, -1)
525 CAVS_SUBPIX_HV(put_, op_put3, jj, 0, -1, 5, 5, -1, 0, 0, -1, 5, 5, -1, 0, 0)
526 CAVS_SUBPIX_HV(put_, op_put4, ff, 0, -1, 5, 5, -1, 0, -1, -2, 96, 42, -7, 0, 0)
527 CAVS_SUBPIX_HV(put_, op_put4, ii, -1, -2, 96, 42, -7, 0, 0, -1, 5, 5, -1, 0, 0)
528 CAVS_SUBPIX_HV(put_, op_put4, kk, 0, -7, 42, 96, -2, -1, 0, -1, 5, 5, -1, 0, 0)
529 CAVS_SUBPIX_HV(put_, op_put4, qq, 0, -1, 5, 5, -1, 0, 0, -7, 42, 96, -2,-1, 0)
530 CAVS_SUBPIX_HV(put_, op_put2, egpr, 0, -1, 5, 5, -1, 0, 0, -1, 5, 5, -1, 0, 1)
531 CAVS_SUBPIX(avg_ , op_avg1, hpel, 0, -1, 5, 5, -1, 0)
532 CAVS_SUBPIX(avg_ , op_avg2, qpel_l, -1, -2, 96, 42, -7, 0)
533 CAVS_SUBPIX(avg_ , op_avg2, qpel_r, 0, -7, 42, 96, -2, -1)
534 CAVS_SUBPIX_HV(avg_, op_avg3, jj, 0, -1, 5, 5, -1, 0, 0, -1, 5, 5, -1, 0, 0)
535 CAVS_SUBPIX_HV(avg_, op_avg4, ff, 0, -1, 5, 5, -1, 0, -1, -2, 96, 42, -7, 0, 0)
536 CAVS_SUBPIX_HV(avg_, op_avg4, ii, -1, -2, 96, 42, -7, 0, 0, -1, 5, 5, -1, 0, 0)
537 CAVS_SUBPIX_HV(avg_, op_avg4, kk, 0, -7, 42, 96, -2, -1, 0, -1, 5, 5, -1, 0, 0)
538 CAVS_SUBPIX_HV(avg_, op_avg4, qq, 0, -1, 5, 5, -1, 0, 0, -7, 42, 96, -2,-1, 0)
539 CAVS_SUBPIX_HV(avg_, op_avg2, egpr, 0, -1, 5, 5, -1, 0, 0, -1, 5, 5, -1, 0, 1)
540 CAVS_MC(put_, 8)
541 CAVS_MC(put_, 16)
542 CAVS_MC(avg_, 8)
543 CAVS_MC(avg_, 16)
545 #define put_cavs_qpel8_mc00_c ff_put_pixels8x8_c
546 #define avg_cavs_qpel8_mc00_c ff_avg_pixels8x8_c
547 #define put_cavs_qpel16_mc00_c ff_put_pixels16x16_c
548 #define avg_cavs_qpel16_mc00_c ff_avg_pixels16x16_c
550 av_cold void ff_cavsdsp_init(CAVSDSPContext* c)
552 #define dspfunc(PFX, IDX, NUM) \
553 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_c; \
554 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_c; \
555 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_c; \
556 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_c; \
557 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_c; \
558 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_c; \
559 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_c; \
560 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_c; \
561 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_c; \
562 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_c; \
563 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_c; \
564 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_c; \
565 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_c; \
566 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_c; \
567 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_c; \
568 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_c
569 dspfunc(put_cavs_qpel, 0, 16);
570 dspfunc(put_cavs_qpel, 1, 8);
571 dspfunc(avg_cavs_qpel, 0, 16);
572 dspfunc(avg_cavs_qpel, 1, 8);
573 c->cavs_filter_lv = cavs_filter_lv_c;
574 c->cavs_filter_lh = cavs_filter_lh_c;
575 c->cavs_filter_cv = cavs_filter_cv_c;
576 c->cavs_filter_ch = cavs_filter_ch_c;
577 c->cavs_idct8_add = cavs_idct8_add_c;
578 c->idct_perm = FF_IDCT_PERM_NONE;
580 #if ARCH_X86
581 ff_cavsdsp_init_x86(c);
582 #endif