2 * VC-1 and WMV3 decoder - DSP functions
3 * Copyright (c) 2006 Konstantin Shishkov
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * @file libavcodec/vc1dsp.c
24 * VC-1 and WMV3 decoder
31 /** Apply overlap transform to horizontal edge
33 static void vc1_v_overlap_c(uint8_t* src
, int stride
)
39 for(i
= 0; i
< 8; i
++) {
44 d1
= (a
- d
+ 3 + rnd
) >> 3;
45 d2
= (a
- d
+ b
- c
+ 4 - rnd
) >> 3;
47 src
[-2*stride
] = a
- d1
;
48 src
[-stride
] = av_clip_uint8(b
- d2
);
49 src
[0] = av_clip_uint8(c
+ d2
);
56 /** Apply overlap transform to vertical edge
58 static void vc1_h_overlap_c(uint8_t* src
, int stride
)
64 for(i
= 0; i
< 8; i
++) {
69 d1
= (a
- d
+ 3 + rnd
) >> 3;
70 d2
= (a
- d
+ b
- c
+ 4 - rnd
) >> 3;
73 src
[-1] = av_clip_uint8(b
- d2
);
74 src
[0] = av_clip_uint8(c
+ d2
);
82 * VC-1 in-loop deblocking filter for one line
83 * @param src source block type
84 * @param stride block stride
85 * @param pq block quantizer
86 * @return whether other 3 pairs should be filtered or not
89 static av_always_inline
int vc1_filter_line(uint8_t* src
, int stride
, int pq
){
90 uint8_t *cm
= ff_cropTbl
+ MAX_NEG_CROP
;
92 int a0
= (2*(src
[-2*stride
] - src
[ 1*stride
]) - 5*(src
[-1*stride
] - src
[ 0*stride
]) + 4) >> 3;
93 int a0_sign
= a0
>> 31; /* Store sign */
94 a0
= (a0
^ a0_sign
) - a0_sign
; /* a0 = FFABS(a0); */
96 int a1
= FFABS((2*(src
[-4*stride
] - src
[-1*stride
]) - 5*(src
[-3*stride
] - src
[-2*stride
]) + 4) >> 3);
97 int a2
= FFABS((2*(src
[ 0*stride
] - src
[ 3*stride
]) - 5*(src
[ 1*stride
] - src
[ 2*stride
]) + 4) >> 3);
98 if(a1
< a0
|| a2
< a0
){
99 int clip
= src
[-1*stride
] - src
[ 0*stride
];
100 int clip_sign
= clip
>> 31;
101 clip
= ((clip
^ clip_sign
) - clip_sign
)>>1;
103 int a3
= FFMIN(a1
, a2
);
104 int d
= 5 * (a3
- a0
);
105 int d_sign
= (d
>> 31);
106 d
= ((d
^ d_sign
) - d_sign
) >> 3;
109 if( d_sign
^ clip_sign
)
113 d
= (d
^ d_sign
) - d_sign
; /* Restore sign */
114 src
[-1*stride
] = cm
[src
[-1*stride
] - d
];
115 src
[ 0*stride
] = cm
[src
[ 0*stride
] + d
];
125 * VC-1 in-loop deblocking filter
126 * @param src source block type
127 * @param step distance between horizontally adjacent elements
128 * @param stride distance between vertically adjacent elements
129 * @param len edge length to filter (4 or 8 pixels)
130 * @param pq block quantizer
133 static inline void vc1_loop_filter(uint8_t* src
, int step
, int stride
, int len
, int pq
)
138 for(i
= 0; i
< len
; i
+= 4){
139 filt3
= vc1_filter_line(src
+ 2*step
, stride
, pq
);
141 vc1_filter_line(src
+ 0*step
, stride
, pq
);
142 vc1_filter_line(src
+ 1*step
, stride
, pq
);
143 vc1_filter_line(src
+ 3*step
, stride
, pq
);
149 static void vc1_v_loop_filter4_c(uint8_t *src
, int stride
, int pq
)
151 vc1_loop_filter(src
, 1, stride
, 4, pq
);
154 static void vc1_h_loop_filter4_c(uint8_t *src
, int stride
, int pq
)
156 vc1_loop_filter(src
, stride
, 1, 4, pq
);
159 static void vc1_v_loop_filter8_c(uint8_t *src
, int stride
, int pq
)
161 vc1_loop_filter(src
, 1, stride
, 8, pq
);
164 static void vc1_h_loop_filter8_c(uint8_t *src
, int stride
, int pq
)
166 vc1_loop_filter(src
, stride
, 1, 8, pq
);
169 static void vc1_v_loop_filter16_c(uint8_t *src
, int stride
, int pq
)
171 vc1_loop_filter(src
, 1, stride
, 16, pq
);
174 static void vc1_h_loop_filter16_c(uint8_t *src
, int stride
, int pq
)
176 vc1_loop_filter(src
, stride
, 1, 16, pq
);
179 /** Do inverse transform on 8x8 block
181 static void vc1_inv_trans_8x8_dc_c(uint8_t *dest
, int linesize
, DCTELEM
*block
)
185 const uint8_t *cm
= ff_cropTbl
+ MAX_NEG_CROP
;
186 dc
= (3 * dc
+ 1) >> 1;
187 dc
= (3 * dc
+ 16) >> 5;
188 for(i
= 0; i
< 8; i
++){
189 dest
[0] = cm
[dest
[0]+dc
];
190 dest
[1] = cm
[dest
[1]+dc
];
191 dest
[2] = cm
[dest
[2]+dc
];
192 dest
[3] = cm
[dest
[3]+dc
];
193 dest
[4] = cm
[dest
[4]+dc
];
194 dest
[5] = cm
[dest
[5]+dc
];
195 dest
[6] = cm
[dest
[6]+dc
];
196 dest
[7] = cm
[dest
[7]+dc
];
201 static void vc1_inv_trans_8x8_c(DCTELEM block
[64])
204 register int t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
;
209 for(i
= 0; i
< 8; i
++){
210 t1
= 12 * (src
[0] + src
[4]) + 4;
211 t2
= 12 * (src
[0] - src
[4]) + 4;
212 t3
= 16 * src
[2] + 6 * src
[6];
213 t4
= 6 * src
[2] - 16 * src
[6];
220 t1
= 16 * src
[1] + 15 * src
[3] + 9 * src
[5] + 4 * src
[7];
221 t2
= 15 * src
[1] - 4 * src
[3] - 16 * src
[5] - 9 * src
[7];
222 t3
= 9 * src
[1] - 16 * src
[3] + 4 * src
[5] + 15 * src
[7];
223 t4
= 4 * src
[1] - 9 * src
[3] + 15 * src
[5] - 16 * src
[7];
225 dst
[0] = (t5
+ t1
) >> 3;
226 dst
[1] = (t6
+ t2
) >> 3;
227 dst
[2] = (t7
+ t3
) >> 3;
228 dst
[3] = (t8
+ t4
) >> 3;
229 dst
[4] = (t8
- t4
) >> 3;
230 dst
[5] = (t7
- t3
) >> 3;
231 dst
[6] = (t6
- t2
) >> 3;
232 dst
[7] = (t5
- t1
) >> 3;
240 for(i
= 0; i
< 8; i
++){
241 t1
= 12 * (src
[ 0] + src
[32]) + 64;
242 t2
= 12 * (src
[ 0] - src
[32]) + 64;
243 t3
= 16 * src
[16] + 6 * src
[48];
244 t4
= 6 * src
[16] - 16 * src
[48];
251 t1
= 16 * src
[ 8] + 15 * src
[24] + 9 * src
[40] + 4 * src
[56];
252 t2
= 15 * src
[ 8] - 4 * src
[24] - 16 * src
[40] - 9 * src
[56];
253 t3
= 9 * src
[ 8] - 16 * src
[24] + 4 * src
[40] + 15 * src
[56];
254 t4
= 4 * src
[ 8] - 9 * src
[24] + 15 * src
[40] - 16 * src
[56];
256 dst
[ 0] = (t5
+ t1
) >> 7;
257 dst
[ 8] = (t6
+ t2
) >> 7;
258 dst
[16] = (t7
+ t3
) >> 7;
259 dst
[24] = (t8
+ t4
) >> 7;
260 dst
[32] = (t8
- t4
+ 1) >> 7;
261 dst
[40] = (t7
- t3
+ 1) >> 7;
262 dst
[48] = (t6
- t2
+ 1) >> 7;
263 dst
[56] = (t5
- t1
+ 1) >> 7;
270 /** Do inverse transform on 8x4 part of block
272 static void vc1_inv_trans_8x4_dc_c(uint8_t *dest
, int linesize
, DCTELEM
*block
)
276 const uint8_t *cm
= ff_cropTbl
+ MAX_NEG_CROP
;
277 dc
= ( 3 * dc
+ 1) >> 1;
278 dc
= (17 * dc
+ 64) >> 7;
279 for(i
= 0; i
< 4; i
++){
280 dest
[0] = cm
[dest
[0]+dc
];
281 dest
[1] = cm
[dest
[1]+dc
];
282 dest
[2] = cm
[dest
[2]+dc
];
283 dest
[3] = cm
[dest
[3]+dc
];
284 dest
[4] = cm
[dest
[4]+dc
];
285 dest
[5] = cm
[dest
[5]+dc
];
286 dest
[6] = cm
[dest
[6]+dc
];
287 dest
[7] = cm
[dest
[7]+dc
];
292 static void vc1_inv_trans_8x4_c(uint8_t *dest
, int linesize
, DCTELEM
*block
)
295 register int t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
;
297 const uint8_t *cm
= ff_cropTbl
+ MAX_NEG_CROP
;
301 for(i
= 0; i
< 4; i
++){
302 t1
= 12 * (src
[0] + src
[4]) + 4;
303 t2
= 12 * (src
[0] - src
[4]) + 4;
304 t3
= 16 * src
[2] + 6 * src
[6];
305 t4
= 6 * src
[2] - 16 * src
[6];
312 t1
= 16 * src
[1] + 15 * src
[3] + 9 * src
[5] + 4 * src
[7];
313 t2
= 15 * src
[1] - 4 * src
[3] - 16 * src
[5] - 9 * src
[7];
314 t3
= 9 * src
[1] - 16 * src
[3] + 4 * src
[5] + 15 * src
[7];
315 t4
= 4 * src
[1] - 9 * src
[3] + 15 * src
[5] - 16 * src
[7];
317 dst
[0] = (t5
+ t1
) >> 3;
318 dst
[1] = (t6
+ t2
) >> 3;
319 dst
[2] = (t7
+ t3
) >> 3;
320 dst
[3] = (t8
+ t4
) >> 3;
321 dst
[4] = (t8
- t4
) >> 3;
322 dst
[5] = (t7
- t3
) >> 3;
323 dst
[6] = (t6
- t2
) >> 3;
324 dst
[7] = (t5
- t1
) >> 3;
331 for(i
= 0; i
< 8; i
++){
332 t1
= 17 * (src
[ 0] + src
[16]) + 64;
333 t2
= 17 * (src
[ 0] - src
[16]) + 64;
334 t3
= 22 * src
[ 8] + 10 * src
[24];
335 t4
= 22 * src
[24] - 10 * src
[ 8];
337 dest
[0*linesize
] = cm
[dest
[0*linesize
] + ((t1
+ t3
) >> 7)];
338 dest
[1*linesize
] = cm
[dest
[1*linesize
] + ((t2
- t4
) >> 7)];
339 dest
[2*linesize
] = cm
[dest
[2*linesize
] + ((t2
+ t4
) >> 7)];
340 dest
[3*linesize
] = cm
[dest
[3*linesize
] + ((t1
- t3
) >> 7)];
347 /** Do inverse transform on 4x8 parts of block
349 static void vc1_inv_trans_4x8_dc_c(uint8_t *dest
, int linesize
, DCTELEM
*block
)
353 const uint8_t *cm
= ff_cropTbl
+ MAX_NEG_CROP
;
354 dc
= (17 * dc
+ 4) >> 3;
355 dc
= (12 * dc
+ 64) >> 7;
356 for(i
= 0; i
< 8; i
++){
357 dest
[0] = cm
[dest
[0]+dc
];
358 dest
[1] = cm
[dest
[1]+dc
];
359 dest
[2] = cm
[dest
[2]+dc
];
360 dest
[3] = cm
[dest
[3]+dc
];
365 static void vc1_inv_trans_4x8_c(uint8_t *dest
, int linesize
, DCTELEM
*block
)
368 register int t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
;
370 const uint8_t *cm
= ff_cropTbl
+ MAX_NEG_CROP
;
374 for(i
= 0; i
< 8; i
++){
375 t1
= 17 * (src
[0] + src
[2]) + 4;
376 t2
= 17 * (src
[0] - src
[2]) + 4;
377 t3
= 22 * src
[1] + 10 * src
[3];
378 t4
= 22 * src
[3] - 10 * src
[1];
380 dst
[0] = (t1
+ t3
) >> 3;
381 dst
[1] = (t2
- t4
) >> 3;
382 dst
[2] = (t2
+ t4
) >> 3;
383 dst
[3] = (t1
- t3
) >> 3;
390 for(i
= 0; i
< 4; i
++){
391 t1
= 12 * (src
[ 0] + src
[32]) + 64;
392 t2
= 12 * (src
[ 0] - src
[32]) + 64;
393 t3
= 16 * src
[16] + 6 * src
[48];
394 t4
= 6 * src
[16] - 16 * src
[48];
401 t1
= 16 * src
[ 8] + 15 * src
[24] + 9 * src
[40] + 4 * src
[56];
402 t2
= 15 * src
[ 8] - 4 * src
[24] - 16 * src
[40] - 9 * src
[56];
403 t3
= 9 * src
[ 8] - 16 * src
[24] + 4 * src
[40] + 15 * src
[56];
404 t4
= 4 * src
[ 8] - 9 * src
[24] + 15 * src
[40] - 16 * src
[56];
406 dest
[0*linesize
] = cm
[dest
[0*linesize
] + ((t5
+ t1
) >> 7)];
407 dest
[1*linesize
] = cm
[dest
[1*linesize
] + ((t6
+ t2
) >> 7)];
408 dest
[2*linesize
] = cm
[dest
[2*linesize
] + ((t7
+ t3
) >> 7)];
409 dest
[3*linesize
] = cm
[dest
[3*linesize
] + ((t8
+ t4
) >> 7)];
410 dest
[4*linesize
] = cm
[dest
[4*linesize
] + ((t8
- t4
+ 1) >> 7)];
411 dest
[5*linesize
] = cm
[dest
[5*linesize
] + ((t7
- t3
+ 1) >> 7)];
412 dest
[6*linesize
] = cm
[dest
[6*linesize
] + ((t6
- t2
+ 1) >> 7)];
413 dest
[7*linesize
] = cm
[dest
[7*linesize
] + ((t5
- t1
+ 1) >> 7)];
420 /** Do inverse transform on 4x4 part of block
422 static void vc1_inv_trans_4x4_dc_c(uint8_t *dest
, int linesize
, DCTELEM
*block
)
426 const uint8_t *cm
= ff_cropTbl
+ MAX_NEG_CROP
;
427 dc
= (17 * dc
+ 4) >> 3;
428 dc
= (17 * dc
+ 64) >> 7;
429 for(i
= 0; i
< 4; i
++){
430 dest
[0] = cm
[dest
[0]+dc
];
431 dest
[1] = cm
[dest
[1]+dc
];
432 dest
[2] = cm
[dest
[2]+dc
];
433 dest
[3] = cm
[dest
[3]+dc
];
438 static void vc1_inv_trans_4x4_c(uint8_t *dest
, int linesize
, DCTELEM
*block
)
441 register int t1
,t2
,t3
,t4
;
443 const uint8_t *cm
= ff_cropTbl
+ MAX_NEG_CROP
;
447 for(i
= 0; i
< 4; i
++){
448 t1
= 17 * (src
[0] + src
[2]) + 4;
449 t2
= 17 * (src
[0] - src
[2]) + 4;
450 t3
= 22 * src
[1] + 10 * src
[3];
451 t4
= 22 * src
[3] - 10 * src
[1];
453 dst
[0] = (t1
+ t3
) >> 3;
454 dst
[1] = (t2
- t4
) >> 3;
455 dst
[2] = (t2
+ t4
) >> 3;
456 dst
[3] = (t1
- t3
) >> 3;
463 for(i
= 0; i
< 4; i
++){
464 t1
= 17 * (src
[ 0] + src
[16]) + 64;
465 t2
= 17 * (src
[ 0] - src
[16]) + 64;
466 t3
= 22 * src
[ 8] + 10 * src
[24];
467 t4
= 22 * src
[24] - 10 * src
[ 8];
469 dest
[0*linesize
] = cm
[dest
[0*linesize
] + ((t1
+ t3
) >> 7)];
470 dest
[1*linesize
] = cm
[dest
[1*linesize
] + ((t2
- t4
) >> 7)];
471 dest
[2*linesize
] = cm
[dest
[2*linesize
] + ((t2
+ t4
) >> 7)];
472 dest
[3*linesize
] = cm
[dest
[3*linesize
] + ((t1
- t3
) >> 7)];
479 /* motion compensation functions */
480 /** Filter in case of 2 filters */
481 #define VC1_MSPEL_FILTER_16B(DIR, TYPE) \
482 static av_always_inline int vc1_mspel_ ## DIR ## _filter_16bits(const TYPE *src, int stride, int mode) \
485 case 0: /* no shift - should not occur */ \
487 case 1: /* 1/4 shift */ \
488 return -4*src[-stride] + 53*src[0] + 18*src[stride] - 3*src[stride*2]; \
489 case 2: /* 1/2 shift */ \
490 return -src[-stride] + 9*src[0] + 9*src[stride] - src[stride*2]; \
491 case 3: /* 3/4 shift */ \
492 return -3*src[-stride] + 18*src[0] + 53*src[stride] - 4*src[stride*2]; \
494 return 0; /* should not occur */ \
497 VC1_MSPEL_FILTER_16B(ver
, uint8_t);
498 VC1_MSPEL_FILTER_16B(hor
, int16_t);
501 /** Filter used to interpolate fractional pel values
503 static av_always_inline
int vc1_mspel_filter(const uint8_t *src
, int stride
, int mode
, int r
)
509 return (-4*src
[-stride
] + 53*src
[0] + 18*src
[stride
] - 3*src
[stride
*2] + 32 - r
) >> 6;
511 return (-src
[-stride
] + 9*src
[0] + 9*src
[stride
] - src
[stride
*2] + 8 - r
) >> 4;
513 return (-3*src
[-stride
] + 18*src
[0] + 53*src
[stride
] - 4*src
[stride
*2] + 32 - r
) >> 6;
515 return 0; //should not occur
518 /** Function used to do motion compensation with bicubic interpolation
520 #define VC1_MSPEL_MC(OP, OPNAME)\
521 static void OPNAME ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride, int hmode, int vmode, int rnd)\
525 if (vmode) { /* Horizontal filter to apply */\
528 if (hmode) { /* Vertical filter to apply, output to tmp */\
529 static const int shift_value[] = { 0, 5, 1, 5 };\
530 int shift = (shift_value[hmode]+shift_value[vmode])>>1;\
531 int16_t tmp[11*8], *tptr = tmp;\
533 r = (1<<(shift-1)) + rnd-1;\
536 for(j = 0; j < 8; j++) {\
537 for(i = 0; i < 11; i++)\
538 tptr[i] = (vc1_mspel_ver_filter_16bits(src + i, stride, vmode)+r)>>shift;\
545 for(j = 0; j < 8; j++) {\
546 for(i = 0; i < 8; i++)\
547 OP(dst[i], (vc1_mspel_hor_filter_16bits(tptr + i, 1, hmode)+r)>>7);\
554 else { /* No horizontal filter, output 8 lines to dst */\
557 for(j = 0; j < 8; j++) {\
558 for(i = 0; i < 8; i++)\
559 OP(dst[i], vc1_mspel_filter(src + i, stride, vmode, r));\
567 /* Horizontal mode with no vertical mode */\
568 for(j = 0; j < 8; j++) {\
569 for(i = 0; i < 8; i++)\
570 OP(dst[i], vc1_mspel_filter(src + i, 1, hmode, rnd));\
576 #define op_put(a, b) a = av_clip_uint8(b)
577 #define op_avg(a, b) a = (a + av_clip_uint8(b) + 1) >> 1
579 VC1_MSPEL_MC(op_put
, put_
)
580 VC1_MSPEL_MC(op_avg
, avg_
)
582 /* pixel functions - really are entry points to vc1_mspel_mc */
584 /* this one is defined in dsputil.c */
585 void ff_put_vc1_mspel_mc00_c(uint8_t *dst
, const uint8_t *src
, int stride
, int rnd
);
586 void ff_avg_vc1_mspel_mc00_c(uint8_t *dst
, const uint8_t *src
, int stride
, int rnd
);
588 #define PUT_VC1_MSPEL(a, b)\
589 static void put_vc1_mspel_mc ## a ## b ##_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
590 put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
592 static void avg_vc1_mspel_mc ## a ## b ##_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
593 avg_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
615 void ff_vc1dsp_init(DSPContext
* dsp
, AVCodecContext
*avctx
) {
616 dsp
->vc1_inv_trans_8x8
= vc1_inv_trans_8x8_c
;
617 dsp
->vc1_inv_trans_4x8
= vc1_inv_trans_4x8_c
;
618 dsp
->vc1_inv_trans_8x4
= vc1_inv_trans_8x4_c
;
619 dsp
->vc1_inv_trans_4x4
= vc1_inv_trans_4x4_c
;
620 dsp
->vc1_inv_trans_8x8_dc
= vc1_inv_trans_8x8_dc_c
;
621 dsp
->vc1_inv_trans_4x8_dc
= vc1_inv_trans_4x8_dc_c
;
622 dsp
->vc1_inv_trans_8x4_dc
= vc1_inv_trans_8x4_dc_c
;
623 dsp
->vc1_inv_trans_4x4_dc
= vc1_inv_trans_4x4_dc_c
;
624 dsp
->vc1_h_overlap
= vc1_h_overlap_c
;
625 dsp
->vc1_v_overlap
= vc1_v_overlap_c
;
626 dsp
->vc1_v_loop_filter4
= vc1_v_loop_filter4_c
;
627 dsp
->vc1_h_loop_filter4
= vc1_h_loop_filter4_c
;
628 dsp
->vc1_v_loop_filter8
= vc1_v_loop_filter8_c
;
629 dsp
->vc1_h_loop_filter8
= vc1_h_loop_filter8_c
;
630 dsp
->vc1_v_loop_filter16
= vc1_v_loop_filter16_c
;
631 dsp
->vc1_h_loop_filter16
= vc1_h_loop_filter16_c
;
633 dsp
->put_vc1_mspel_pixels_tab
[ 0] = ff_put_vc1_mspel_mc00_c
;
634 dsp
->put_vc1_mspel_pixels_tab
[ 1] = put_vc1_mspel_mc10_c
;
635 dsp
->put_vc1_mspel_pixels_tab
[ 2] = put_vc1_mspel_mc20_c
;
636 dsp
->put_vc1_mspel_pixels_tab
[ 3] = put_vc1_mspel_mc30_c
;
637 dsp
->put_vc1_mspel_pixels_tab
[ 4] = put_vc1_mspel_mc01_c
;
638 dsp
->put_vc1_mspel_pixels_tab
[ 5] = put_vc1_mspel_mc11_c
;
639 dsp
->put_vc1_mspel_pixels_tab
[ 6] = put_vc1_mspel_mc21_c
;
640 dsp
->put_vc1_mspel_pixels_tab
[ 7] = put_vc1_mspel_mc31_c
;
641 dsp
->put_vc1_mspel_pixels_tab
[ 8] = put_vc1_mspel_mc02_c
;
642 dsp
->put_vc1_mspel_pixels_tab
[ 9] = put_vc1_mspel_mc12_c
;
643 dsp
->put_vc1_mspel_pixels_tab
[10] = put_vc1_mspel_mc22_c
;
644 dsp
->put_vc1_mspel_pixels_tab
[11] = put_vc1_mspel_mc32_c
;
645 dsp
->put_vc1_mspel_pixels_tab
[12] = put_vc1_mspel_mc03_c
;
646 dsp
->put_vc1_mspel_pixels_tab
[13] = put_vc1_mspel_mc13_c
;
647 dsp
->put_vc1_mspel_pixels_tab
[14] = put_vc1_mspel_mc23_c
;
648 dsp
->put_vc1_mspel_pixels_tab
[15] = put_vc1_mspel_mc33_c
;
650 dsp
->avg_vc1_mspel_pixels_tab
[ 0] = ff_avg_vc1_mspel_mc00_c
;
651 dsp
->avg_vc1_mspel_pixels_tab
[ 1] = avg_vc1_mspel_mc10_c
;
652 dsp
->avg_vc1_mspel_pixels_tab
[ 2] = avg_vc1_mspel_mc20_c
;
653 dsp
->avg_vc1_mspel_pixels_tab
[ 3] = avg_vc1_mspel_mc30_c
;
654 dsp
->avg_vc1_mspel_pixels_tab
[ 4] = avg_vc1_mspel_mc01_c
;
655 dsp
->avg_vc1_mspel_pixels_tab
[ 5] = avg_vc1_mspel_mc11_c
;
656 dsp
->avg_vc1_mspel_pixels_tab
[ 6] = avg_vc1_mspel_mc21_c
;
657 dsp
->avg_vc1_mspel_pixels_tab
[ 7] = avg_vc1_mspel_mc31_c
;
658 dsp
->avg_vc1_mspel_pixels_tab
[ 8] = avg_vc1_mspel_mc02_c
;
659 dsp
->avg_vc1_mspel_pixels_tab
[ 9] = avg_vc1_mspel_mc12_c
;
660 dsp
->avg_vc1_mspel_pixels_tab
[10] = avg_vc1_mspel_mc22_c
;
661 dsp
->avg_vc1_mspel_pixels_tab
[11] = avg_vc1_mspel_mc32_c
;
662 dsp
->avg_vc1_mspel_pixels_tab
[12] = avg_vc1_mspel_mc03_c
;
663 dsp
->avg_vc1_mspel_pixels_tab
[13] = avg_vc1_mspel_mc13_c
;
664 dsp
->avg_vc1_mspel_pixels_tab
[14] = avg_vc1_mspel_mc23_c
;
665 dsp
->avg_vc1_mspel_pixels_tab
[15] = avg_vc1_mspel_mc33_c
;