aarch64: Add assembly support for -fsanitize=hwaddress tagged globals.
[libav.git] / libavcodec / cavsdsp.c
bloba374dec30b92be6c2ceb4e2059e70b7ef12e7eec
1 /*
2 * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
4 * DSP functions
6 * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include <stdio.h>
27 #include "idctdsp.h"
28 #include "mathops.h"
29 #include "cavsdsp.h"
30 #include "libavutil/common.h"
32 /*****************************************************************************
34 * in-loop deblocking filter
36 ****************************************************************************/
38 #define P2 p0_p[-3*stride]
39 #define P1 p0_p[-2*stride]
40 #define P0 p0_p[-1*stride]
41 #define Q0 p0_p[ 0*stride]
42 #define Q1 p0_p[ 1*stride]
43 #define Q2 p0_p[ 2*stride]
45 static inline void loop_filter_l2(uint8_t *p0_p, ptrdiff_t stride, int alpha, int beta)
47 int p0 = P0;
48 int q0 = Q0;
50 if(abs(p0-q0)<alpha && abs(P1-p0)<beta && abs(Q1-q0)<beta) {
51 int s = p0 + q0 + 2;
52 alpha = (alpha>>2) + 2;
53 if(abs(P2-p0) < beta && abs(p0-q0) < alpha) {
54 P0 = (P1 + p0 + s) >> 2;
55 P1 = (2*P1 + s) >> 2;
56 } else
57 P0 = (2*P1 + s) >> 2;
58 if(abs(Q2-q0) < beta && abs(q0-p0) < alpha) {
59 Q0 = (Q1 + q0 + s) >> 2;
60 Q1 = (2*Q1 + s) >> 2;
61 } else
62 Q0 = (2*Q1 + s) >> 2;
66 static inline void loop_filter_l1(uint8_t *p0_p, ptrdiff_t stride, int alpha, int beta, int tc)
68 int p0 = P0;
69 int q0 = Q0;
71 if(abs(p0-q0)<alpha && abs(P1-p0)<beta && abs(Q1-q0)<beta) {
72 int delta = av_clip(((q0-p0)*3+P1-Q1+4)>>3,-tc, tc);
73 P0 = av_clip_uint8(p0+delta);
74 Q0 = av_clip_uint8(q0-delta);
75 if(abs(P2-p0)<beta) {
76 delta = av_clip(((P0-P1)*3+P2-Q0+4)>>3, -tc, tc);
77 P1 = av_clip_uint8(P1+delta);
79 if(abs(Q2-q0)<beta) {
80 delta = av_clip(((Q1-Q0)*3+P0-Q2+4)>>3, -tc, tc);
81 Q1 = av_clip_uint8(Q1-delta);
86 static inline void loop_filter_c2(uint8_t *p0_p, ptrdiff_t stride, int alpha, int beta)
88 int p0 = P0;
89 int q0 = Q0;
91 if(abs(p0-q0)<alpha && abs(P1-p0)<beta && abs(Q1-q0)<beta) {
92 int s = p0 + q0 + 2;
93 alpha = (alpha>>2) + 2;
94 if(abs(P2-p0) < beta && abs(p0-q0) < alpha) {
95 P0 = (P1 + p0 + s) >> 2;
96 } else
97 P0 = (2*P1 + s) >> 2;
98 if(abs(Q2-q0) < beta && abs(q0-p0) < alpha) {
99 Q0 = (Q1 + q0 + s) >> 2;
100 } else
101 Q0 = (2*Q1 + s) >> 2;
105 static inline void loop_filter_c1(uint8_t *p0_p, ptrdiff_t stride, int alpha, int beta,
106 int tc)
108 if(abs(P0-Q0)<alpha && abs(P1-P0)<beta && abs(Q1-Q0)<beta) {
109 int delta = av_clip(((Q0-P0)*3+P1-Q1+4)>>3, -tc, tc);
110 P0 = av_clip_uint8(P0+delta);
111 Q0 = av_clip_uint8(Q0-delta);
115 #undef P0
116 #undef P1
117 #undef P2
118 #undef Q0
119 #undef Q1
120 #undef Q2
122 static void cavs_filter_lv_c(uint8_t *d, ptrdiff_t stride, int alpha, int beta, int tc,
123 int bs1, int bs2)
125 int i;
126 if(bs1==2)
127 for(i=0;i<16;i++)
128 loop_filter_l2(d + i*stride,1,alpha,beta);
129 else {
130 if(bs1)
131 for(i=0;i<8;i++)
132 loop_filter_l1(d + i*stride,1,alpha,beta,tc);
133 if (bs2)
134 for(i=8;i<16;i++)
135 loop_filter_l1(d + i*stride,1,alpha,beta,tc);
139 static void cavs_filter_lh_c(uint8_t *d, ptrdiff_t stride, int alpha, int beta, int tc,
140 int bs1, int bs2)
142 int i;
143 if(bs1==2)
144 for(i=0;i<16;i++)
145 loop_filter_l2(d + i,stride,alpha,beta);
146 else {
147 if(bs1)
148 for(i=0;i<8;i++)
149 loop_filter_l1(d + i,stride,alpha,beta,tc);
150 if (bs2)
151 for(i=8;i<16;i++)
152 loop_filter_l1(d + i,stride,alpha,beta,tc);
156 static void cavs_filter_cv_c(uint8_t *d, ptrdiff_t stride, int alpha, int beta, int tc,
157 int bs1, int bs2)
159 int i;
160 if(bs1==2)
161 for(i=0;i<8;i++)
162 loop_filter_c2(d + i*stride,1,alpha,beta);
163 else {
164 if(bs1)
165 for(i=0;i<4;i++)
166 loop_filter_c1(d + i*stride,1,alpha,beta,tc);
167 if (bs2)
168 for(i=4;i<8;i++)
169 loop_filter_c1(d + i*stride,1,alpha,beta,tc);
173 static void cavs_filter_ch_c(uint8_t *d, ptrdiff_t stride, int alpha, int beta, int tc,
174 int bs1, int bs2)
176 int i;
177 if(bs1==2)
178 for(i=0;i<8;i++)
179 loop_filter_c2(d + i,stride,alpha,beta);
180 else {
181 if(bs1)
182 for(i=0;i<4;i++)
183 loop_filter_c1(d + i,stride,alpha,beta,tc);
184 if (bs2)
185 for(i=4;i<8;i++)
186 loop_filter_c1(d + i,stride,alpha,beta,tc);
190 /*****************************************************************************
192 * inverse transform
194 ****************************************************************************/
196 static void cavs_idct8_add_c(uint8_t *dst, int16_t *block, ptrdiff_t stride)
198 int i;
199 int16_t (*src)[8] = (int16_t(*)[8])block;
200 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
202 src[0][0] += 8;
204 for( i = 0; i < 8; i++ ) {
205 const int a0 = 3*src[i][1] - (src[i][7]<<1);
206 const int a1 = 3*src[i][3] + (src[i][5]<<1);
207 const int a2 = (src[i][3]<<1) - 3*src[i][5];
208 const int a3 = (src[i][1]<<1) + 3*src[i][7];
210 const int b4 = ((a0 + a1 + a3)<<1) + a1;
211 const int b5 = ((a0 - a1 + a2)<<1) + a0;
212 const int b6 = ((a3 - a2 - a1)<<1) + a3;
213 const int b7 = ((a0 - a2 - a3)<<1) - a2;
215 const int a7 = (src[i][2]<<2) - 10*src[i][6];
216 const int a6 = (src[i][6]<<2) + 10*src[i][2];
217 const int a5 = ((src[i][0] - src[i][4]) << 3) + 4;
218 const int a4 = ((src[i][0] + src[i][4]) << 3) + 4;
220 const int b0 = a4 + a6;
221 const int b1 = a5 + a7;
222 const int b2 = a5 - a7;
223 const int b3 = a4 - a6;
225 src[i][0] = (b0 + b4) >> 3;
226 src[i][1] = (b1 + b5) >> 3;
227 src[i][2] = (b2 + b6) >> 3;
228 src[i][3] = (b3 + b7) >> 3;
229 src[i][4] = (b3 - b7) >> 3;
230 src[i][5] = (b2 - b6) >> 3;
231 src[i][6] = (b1 - b5) >> 3;
232 src[i][7] = (b0 - b4) >> 3;
234 for( i = 0; i < 8; i++ ) {
235 const int a0 = 3*src[1][i] - (src[7][i]<<1);
236 const int a1 = 3*src[3][i] + (src[5][i]<<1);
237 const int a2 = (src[3][i]<<1) - 3*src[5][i];
238 const int a3 = (src[1][i]<<1) + 3*src[7][i];
240 const int b4 = ((a0 + a1 + a3)<<1) + a1;
241 const int b5 = ((a0 - a1 + a2)<<1) + a0;
242 const int b6 = ((a3 - a2 - a1)<<1) + a3;
243 const int b7 = ((a0 - a2 - a3)<<1) - a2;
245 const int a7 = (src[2][i]<<2) - 10*src[6][i];
246 const int a6 = (src[6][i]<<2) + 10*src[2][i];
247 const int a5 = (src[0][i] - src[4][i]) << 3;
248 const int a4 = (src[0][i] + src[4][i]) << 3;
250 const int b0 = a4 + a6;
251 const int b1 = a5 + a7;
252 const int b2 = a5 - a7;
253 const int b3 = a4 - a6;
255 dst[i + 0*stride] = cm[ dst[i + 0*stride] + ((b0 + b4) >> 7)];
256 dst[i + 1*stride] = cm[ dst[i + 1*stride] + ((b1 + b5) >> 7)];
257 dst[i + 2*stride] = cm[ dst[i + 2*stride] + ((b2 + b6) >> 7)];
258 dst[i + 3*stride] = cm[ dst[i + 3*stride] + ((b3 + b7) >> 7)];
259 dst[i + 4*stride] = cm[ dst[i + 4*stride] + ((b3 - b7) >> 7)];
260 dst[i + 5*stride] = cm[ dst[i + 5*stride] + ((b2 - b6) >> 7)];
261 dst[i + 6*stride] = cm[ dst[i + 6*stride] + ((b1 - b5) >> 7)];
262 dst[i + 7*stride] = cm[ dst[i + 7*stride] + ((b0 - b4) >> 7)];
266 /*****************************************************************************
268 * motion compensation
270 ****************************************************************************/
272 #define CAVS_SUBPIX(OPNAME, OP, NAME, A, B, C, D, E, F) \
273 static void OPNAME ## cavs_filt8_h_ ## NAME(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
275 const int h=8;\
276 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;\
277 int i;\
278 for(i=0; i<h; i++)\
280 OP(dst[0], A*src[-2] + B*src[-1] + C*src[0] + D*src[1] + E*src[2] + F*src[3]);\
281 OP(dst[1], A*src[-1] + B*src[ 0] + C*src[1] + D*src[2] + E*src[3] + F*src[4]);\
282 OP(dst[2], A*src[ 0] + B*src[ 1] + C*src[2] + D*src[3] + E*src[4] + F*src[5]);\
283 OP(dst[3], A*src[ 1] + B*src[ 2] + C*src[3] + D*src[4] + E*src[5] + F*src[6]);\
284 OP(dst[4], A*src[ 2] + B*src[ 3] + C*src[4] + D*src[5] + E*src[6] + F*src[7]);\
285 OP(dst[5], A*src[ 3] + B*src[ 4] + C*src[5] + D*src[6] + E*src[7] + F*src[8]);\
286 OP(dst[6], A*src[ 4] + B*src[ 5] + C*src[6] + D*src[7] + E*src[8] + F*src[9]);\
287 OP(dst[7], A*src[ 5] + B*src[ 6] + C*src[7] + D*src[8] + E*src[9] + F*src[10]);\
288 dst+=dstStride;\
289 src+=srcStride;\
293 static void OPNAME ## cavs_filt8_v_ ## NAME(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
295 const int w=8;\
296 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;\
297 int i;\
298 for(i=0; i<w; i++)\
300 const int srcB= src[-2*srcStride];\
301 const int srcA= src[-1*srcStride];\
302 const int src0= src[0 *srcStride];\
303 const int src1= src[1 *srcStride];\
304 const int src2= src[2 *srcStride];\
305 const int src3= src[3 *srcStride];\
306 const int src4= src[4 *srcStride];\
307 const int src5= src[5 *srcStride];\
308 const int src6= src[6 *srcStride];\
309 const int src7= src[7 *srcStride];\
310 const int src8= src[8 *srcStride];\
311 const int src9= src[9 *srcStride];\
312 const int src10= src[10 *srcStride];\
313 OP(dst[0*dstStride], A*srcB + B*srcA + C*src0 + D*src1 + E*src2 + F*src3);\
314 OP(dst[1*dstStride], A*srcA + B*src0 + C*src1 + D*src2 + E*src3 + F*src4);\
315 OP(dst[2*dstStride], A*src0 + B*src1 + C*src2 + D*src3 + E*src4 + F*src5);\
316 OP(dst[3*dstStride], A*src1 + B*src2 + C*src3 + D*src4 + E*src5 + F*src6);\
317 OP(dst[4*dstStride], A*src2 + B*src3 + C*src4 + D*src5 + E*src6 + F*src7);\
318 OP(dst[5*dstStride], A*src3 + B*src4 + C*src5 + D*src6 + E*src7 + F*src8);\
319 OP(dst[6*dstStride], A*src4 + B*src5 + C*src6 + D*src7 + E*src8 + F*src9);\
320 OP(dst[7*dstStride], A*src5 + B*src6 + C*src7 + D*src8 + E*src9 + F*src10);\
321 dst++;\
322 src++;\
326 static void OPNAME ## cavs_filt16_v_ ## NAME(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
328 OPNAME ## cavs_filt8_v_ ## NAME(dst , src , dstStride, srcStride);\
329 OPNAME ## cavs_filt8_v_ ## NAME(dst+8, src+8, dstStride, srcStride);\
330 src += 8*srcStride;\
331 dst += 8*dstStride;\
332 OPNAME ## cavs_filt8_v_ ## NAME(dst , src , dstStride, srcStride);\
333 OPNAME ## cavs_filt8_v_ ## NAME(dst+8, src+8, dstStride, srcStride);\
336 static void OPNAME ## cavs_filt16_h_ ## NAME(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
338 OPNAME ## cavs_filt8_h_ ## NAME(dst , src , dstStride, srcStride);\
339 OPNAME ## cavs_filt8_h_ ## NAME(dst+8, src+8, dstStride, srcStride);\
340 src += 8*srcStride;\
341 dst += 8*dstStride;\
342 OPNAME ## cavs_filt8_h_ ## NAME(dst , src , dstStride, srcStride);\
343 OPNAME ## cavs_filt8_h_ ## NAME(dst+8, src+8, dstStride, srcStride);\
346 #define CAVS_SUBPIX_HV(OPNAME, OP, NAME, AH, BH, CH, DH, EH, FH, AV, BV, CV, DV, EV, FV, FULL) \
347 static void OPNAME ## cavs_filt8_hv_ ## NAME(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, ptrdiff_t dstStride, ptrdiff_t srcStride)\
349 int16_t temp[8*(8+5)];\
350 int16_t *tmp = temp;\
351 const int h=8;\
352 const int w=8;\
353 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;\
354 int i;\
355 src1 -= 2*srcStride;\
356 for(i=0; i<h+5; i++)\
358 tmp[0]= AH*src1[-2] + BH*src1[-1] + CH*src1[0] + DH*src1[1] + EH*src1[2] + FH*src1[3];\
359 tmp[1]= AH*src1[-1] + BH*src1[ 0] + CH*src1[1] + DH*src1[2] + EH*src1[3] + FH*src1[4];\
360 tmp[2]= AH*src1[ 0] + BH*src1[ 1] + CH*src1[2] + DH*src1[3] + EH*src1[4] + FH*src1[5];\
361 tmp[3]= AH*src1[ 1] + BH*src1[ 2] + CH*src1[3] + DH*src1[4] + EH*src1[5] + FH*src1[6];\
362 tmp[4]= AH*src1[ 2] + BH*src1[ 3] + CH*src1[4] + DH*src1[5] + EH*src1[6] + FH*src1[7];\
363 tmp[5]= AH*src1[ 3] + BH*src1[ 4] + CH*src1[5] + DH*src1[6] + EH*src1[7] + FH*src1[8];\
364 tmp[6]= AH*src1[ 4] + BH*src1[ 5] + CH*src1[6] + DH*src1[7] + EH*src1[8] + FH*src1[9];\
365 tmp[7]= AH*src1[ 5] + BH*src1[ 6] + CH*src1[7] + DH*src1[8] + EH*src1[9] + FH*src1[10];\
366 tmp+=8;\
367 src1+=srcStride;\
369 if(FULL) {\
370 tmp = temp+8*2; \
371 for(i=0; i<w; i++) \
373 const int tmpB= tmp[-2*8]; \
374 const int tmpA= tmp[-1*8]; \
375 const int tmp0= tmp[0 *8]; \
376 const int tmp1= tmp[1 *8]; \
377 const int tmp2= tmp[2 *8]; \
378 const int tmp3= tmp[3 *8]; \
379 const int tmp4= tmp[4 *8]; \
380 const int tmp5= tmp[5 *8]; \
381 const int tmp6= tmp[6 *8]; \
382 const int tmp7= tmp[7 *8]; \
383 const int tmp8= tmp[8 *8]; \
384 const int tmp9= tmp[9 *8]; \
385 const int tmp10=tmp[10*8]; \
386 OP(dst[0*dstStride], AV*tmpB + BV*tmpA + CV*tmp0 + DV*tmp1 + EV*tmp2 + FV*tmp3 + 64*src2[0*srcStride]); \
387 OP(dst[1*dstStride], AV*tmpA + BV*tmp0 + CV*tmp1 + DV*tmp2 + EV*tmp3 + FV*tmp4 + 64*src2[1*srcStride]); \
388 OP(dst[2*dstStride], AV*tmp0 + BV*tmp1 + CV*tmp2 + DV*tmp3 + EV*tmp4 + FV*tmp5 + 64*src2[2*srcStride]); \
389 OP(dst[3*dstStride], AV*tmp1 + BV*tmp2 + CV*tmp3 + DV*tmp4 + EV*tmp5 + FV*tmp6 + 64*src2[3*srcStride]); \
390 OP(dst[4*dstStride], AV*tmp2 + BV*tmp3 + CV*tmp4 + DV*tmp5 + EV*tmp6 + FV*tmp7 + 64*src2[4*srcStride]); \
391 OP(dst[5*dstStride], AV*tmp3 + BV*tmp4 + CV*tmp5 + DV*tmp6 + EV*tmp7 + FV*tmp8 + 64*src2[5*srcStride]); \
392 OP(dst[6*dstStride], AV*tmp4 + BV*tmp5 + CV*tmp6 + DV*tmp7 + EV*tmp8 + FV*tmp9 + 64*src2[6*srcStride]); \
393 OP(dst[7*dstStride], AV*tmp5 + BV*tmp6 + CV*tmp7 + DV*tmp8 + EV*tmp9 + FV*tmp10 + 64*src2[7*srcStride]); \
394 dst++; \
395 tmp++; \
396 src2++; \
398 } else {\
399 tmp = temp+8*2; \
400 for(i=0; i<w; i++) \
402 const int tmpB= tmp[-2*8]; \
403 const int tmpA= tmp[-1*8]; \
404 const int tmp0= tmp[0 *8]; \
405 const int tmp1= tmp[1 *8]; \
406 const int tmp2= tmp[2 *8]; \
407 const int tmp3= tmp[3 *8]; \
408 const int tmp4= tmp[4 *8]; \
409 const int tmp5= tmp[5 *8]; \
410 const int tmp6= tmp[6 *8]; \
411 const int tmp7= tmp[7 *8]; \
412 const int tmp8= tmp[8 *8]; \
413 const int tmp9= tmp[9 *8]; \
414 const int tmp10=tmp[10*8]; \
415 OP(dst[0*dstStride], AV*tmpB + BV*tmpA + CV*tmp0 + DV*tmp1 + EV*tmp2 + FV*tmp3); \
416 OP(dst[1*dstStride], AV*tmpA + BV*tmp0 + CV*tmp1 + DV*tmp2 + EV*tmp3 + FV*tmp4); \
417 OP(dst[2*dstStride], AV*tmp0 + BV*tmp1 + CV*tmp2 + DV*tmp3 + EV*tmp4 + FV*tmp5); \
418 OP(dst[3*dstStride], AV*tmp1 + BV*tmp2 + CV*tmp3 + DV*tmp4 + EV*tmp5 + FV*tmp6); \
419 OP(dst[4*dstStride], AV*tmp2 + BV*tmp3 + CV*tmp4 + DV*tmp5 + EV*tmp6 + FV*tmp7); \
420 OP(dst[5*dstStride], AV*tmp3 + BV*tmp4 + CV*tmp5 + DV*tmp6 + EV*tmp7 + FV*tmp8); \
421 OP(dst[6*dstStride], AV*tmp4 + BV*tmp5 + CV*tmp6 + DV*tmp7 + EV*tmp8 + FV*tmp9); \
422 OP(dst[7*dstStride], AV*tmp5 + BV*tmp6 + CV*tmp7 + DV*tmp8 + EV*tmp9 + FV*tmp10); \
423 dst++; \
424 tmp++; \
429 static void OPNAME ## cavs_filt16_hv_ ## NAME(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, ptrdiff_t dstStride, ptrdiff_t srcStride)\
431 OPNAME ## cavs_filt8_hv_ ## NAME(dst , src1, src2 , dstStride, srcStride); \
432 OPNAME ## cavs_filt8_hv_ ## NAME(dst+8, src1+8, src2+8, dstStride, srcStride); \
433 src1 += 8*srcStride;\
434 src2 += 8*srcStride;\
435 dst += 8*dstStride;\
436 OPNAME ## cavs_filt8_hv_ ## NAME(dst , src1, src2 , dstStride, srcStride); \
437 OPNAME ## cavs_filt8_hv_ ## NAME(dst+8, src1+8, src2+8, dstStride, srcStride); \
440 #define CAVS_MC(OPNAME, SIZE) \
441 static void OPNAME ## cavs_qpel ## SIZE ## _mc10_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
443 OPNAME ## cavs_filt ## SIZE ## _h_qpel_l(dst, src, stride, stride);\
446 static void OPNAME ## cavs_qpel ## SIZE ## _mc20_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
448 OPNAME ## cavs_filt ## SIZE ## _h_hpel(dst, src, stride, stride);\
451 static void OPNAME ## cavs_qpel ## SIZE ## _mc30_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
453 OPNAME ## cavs_filt ## SIZE ## _h_qpel_r(dst, src, stride, stride);\
456 static void OPNAME ## cavs_qpel ## SIZE ## _mc01_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
458 OPNAME ## cavs_filt ## SIZE ## _v_qpel_l(dst, src, stride, stride);\
461 static void OPNAME ## cavs_qpel ## SIZE ## _mc02_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
463 OPNAME ## cavs_filt ## SIZE ## _v_hpel(dst, src, stride, stride);\
466 static void OPNAME ## cavs_qpel ## SIZE ## _mc03_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
468 OPNAME ## cavs_filt ## SIZE ## _v_qpel_r(dst, src, stride, stride);\
471 static void OPNAME ## cavs_qpel ## SIZE ## _mc22_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
473 OPNAME ## cavs_filt ## SIZE ## _hv_jj(dst, src, NULL, stride, stride); \
476 static void OPNAME ## cavs_qpel ## SIZE ## _mc11_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
478 OPNAME ## cavs_filt ## SIZE ## _hv_egpr(dst, src, src, stride, stride); \
481 static void OPNAME ## cavs_qpel ## SIZE ## _mc13_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
483 OPNAME ## cavs_filt ## SIZE ## _hv_egpr(dst, src, src+stride, stride, stride); \
486 static void OPNAME ## cavs_qpel ## SIZE ## _mc31_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
488 OPNAME ## cavs_filt ## SIZE ## _hv_egpr(dst, src, src+1, stride, stride); \
491 static void OPNAME ## cavs_qpel ## SIZE ## _mc33_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
493 OPNAME ## cavs_filt ## SIZE ## _hv_egpr(dst, src, src+stride+1,stride, stride); \
496 static void OPNAME ## cavs_qpel ## SIZE ## _mc21_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
498 OPNAME ## cavs_filt ## SIZE ## _hv_ff(dst, src, src+stride+1,stride, stride); \
501 static void OPNAME ## cavs_qpel ## SIZE ## _mc12_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
503 OPNAME ## cavs_filt ## SIZE ## _hv_ii(dst, src, src+stride+1,stride, stride); \
506 static void OPNAME ## cavs_qpel ## SIZE ## _mc32_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
508 OPNAME ## cavs_filt ## SIZE ## _hv_kk(dst, src, src+stride+1,stride, stride); \
511 static void OPNAME ## cavs_qpel ## SIZE ## _mc23_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
513 OPNAME ## cavs_filt ## SIZE ## _hv_qq(dst, src, src+stride+1,stride, stride); \
516 #define op_put1(a, b) a = cm[((b)+4)>>3]
517 #define op_put2(a, b) a = cm[((b)+64)>>7]
518 #define op_put3(a, b) a = cm[((b)+32)>>6]
519 #define op_put4(a, b) a = cm[((b)+512)>>10]
520 #define op_avg1(a, b) a = ((a)+cm[((b)+4)>>3] +1)>>1
521 #define op_avg2(a, b) a = ((a)+cm[((b)+64)>>7] +1)>>1
522 #define op_avg3(a, b) a = ((a)+cm[((b)+32)>>6] +1)>>1
523 #define op_avg4(a, b) a = ((a)+cm[((b)+512)>>10]+1)>>1
524 CAVS_SUBPIX(put_ , op_put1, hpel, 0, -1, 5, 5, -1, 0)
525 CAVS_SUBPIX(put_ , op_put2, qpel_l, -1, -2, 96, 42, -7, 0)
526 CAVS_SUBPIX(put_ , op_put2, qpel_r, 0, -7, 42, 96, -2, -1)
527 CAVS_SUBPIX_HV(put_, op_put3, jj, 0, -1, 5, 5, -1, 0, 0, -1, 5, 5, -1, 0, 0)
528 CAVS_SUBPIX_HV(put_, op_put4, ff, 0, -1, 5, 5, -1, 0, -1, -2, 96, 42, -7, 0, 0)
529 CAVS_SUBPIX_HV(put_, op_put4, ii, -1, -2, 96, 42, -7, 0, 0, -1, 5, 5, -1, 0, 0)
530 CAVS_SUBPIX_HV(put_, op_put4, kk, 0, -7, 42, 96, -2, -1, 0, -1, 5, 5, -1, 0, 0)
531 CAVS_SUBPIX_HV(put_, op_put4, qq, 0, -1, 5, 5, -1, 0, 0, -7, 42, 96, -2,-1, 0)
532 CAVS_SUBPIX_HV(put_, op_put2, egpr, 0, -1, 5, 5, -1, 0, 0, -1, 5, 5, -1, 0, 1)
533 CAVS_SUBPIX(avg_ , op_avg1, hpel, 0, -1, 5, 5, -1, 0)
534 CAVS_SUBPIX(avg_ , op_avg2, qpel_l, -1, -2, 96, 42, -7, 0)
535 CAVS_SUBPIX(avg_ , op_avg2, qpel_r, 0, -7, 42, 96, -2, -1)
536 CAVS_SUBPIX_HV(avg_, op_avg3, jj, 0, -1, 5, 5, -1, 0, 0, -1, 5, 5, -1, 0, 0)
537 CAVS_SUBPIX_HV(avg_, op_avg4, ff, 0, -1, 5, 5, -1, 0, -1, -2, 96, 42, -7, 0, 0)
538 CAVS_SUBPIX_HV(avg_, op_avg4, ii, -1, -2, 96, 42, -7, 0, 0, -1, 5, 5, -1, 0, 0)
539 CAVS_SUBPIX_HV(avg_, op_avg4, kk, 0, -7, 42, 96, -2, -1, 0, -1, 5, 5, -1, 0, 0)
540 CAVS_SUBPIX_HV(avg_, op_avg4, qq, 0, -1, 5, 5, -1, 0, 0, -7, 42, 96, -2,-1, 0)
541 CAVS_SUBPIX_HV(avg_, op_avg2, egpr, 0, -1, 5, 5, -1, 0, 0, -1, 5, 5, -1, 0, 1)
542 CAVS_MC(put_, 8)
543 CAVS_MC(put_, 16)
544 CAVS_MC(avg_, 8)
545 CAVS_MC(avg_, 16)
547 #define put_cavs_qpel8_mc00_c ff_put_pixels8x8_c
548 #define avg_cavs_qpel8_mc00_c ff_avg_pixels8x8_c
549 #define put_cavs_qpel16_mc00_c ff_put_pixels16x16_c
550 #define avg_cavs_qpel16_mc00_c ff_avg_pixels16x16_c
552 av_cold void ff_cavsdsp_init(CAVSDSPContext* c, AVCodecContext *avctx) {
553 #define dspfunc(PFX, IDX, NUM) \
554 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_c; \
555 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_c; \
556 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_c; \
557 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_c; \
558 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_c; \
559 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_c; \
560 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_c; \
561 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_c; \
562 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_c; \
563 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_c; \
564 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_c; \
565 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_c; \
566 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_c; \
567 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_c; \
568 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_c; \
569 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_c
570 dspfunc(put_cavs_qpel, 0, 16);
571 dspfunc(put_cavs_qpel, 1, 8);
572 dspfunc(avg_cavs_qpel, 0, 16);
573 dspfunc(avg_cavs_qpel, 1, 8);
574 c->cavs_filter_lv = cavs_filter_lv_c;
575 c->cavs_filter_lh = cavs_filter_lh_c;
576 c->cavs_filter_cv = cavs_filter_cv_c;
577 c->cavs_filter_ch = cavs_filter_ch_c;
578 c->cavs_idct8_add = cavs_idct8_add_c;
579 c->idct_perm = FF_IDCT_PERM_NONE;
581 if (ARCH_X86)
582 ff_cavsdsp_init_x86(c, avctx);