2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include "vpx_ports/config.h"
13 #include "vp8/encoder/variance.h"
14 #include "vp8/encoder/onyx_int.h"
17 void vp8_arch_x86_encoder_init(VP8_COMP
*cpi
);
18 void vp8_arch_arm_encoder_init(VP8_COMP
*cpi
);
20 extern void vp8_fast_quantize_b_c(BLOCK
*b
, BLOCKD
*d
);
22 void (*vp8_yv12_copy_partial_frame_ptr
)(YV12_BUFFER_CONFIG
*src_ybc
, YV12_BUFFER_CONFIG
*dst_ybc
, int Fraction
);
23 extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG
*src_ybc
, YV12_BUFFER_CONFIG
*dst_ybc
, int Fraction
);
25 void vp8_cmachine_specific_config(VP8_COMP
*cpi
)
27 #if CONFIG_RUNTIME_CPU_DETECT
28 cpi
->rtcd
.common
= &cpi
->common
.rtcd
;
29 cpi
->rtcd
.variance
.sad16x16
= vp8_sad16x16_c
;
30 cpi
->rtcd
.variance
.sad16x8
= vp8_sad16x8_c
;
31 cpi
->rtcd
.variance
.sad8x16
= vp8_sad8x16_c
;
32 cpi
->rtcd
.variance
.sad8x8
= vp8_sad8x8_c
;
33 cpi
->rtcd
.variance
.sad4x4
= vp8_sad4x4_c
;
35 cpi
->rtcd
.variance
.sad16x16x3
= vp8_sad16x16x3_c
;
36 cpi
->rtcd
.variance
.sad16x8x3
= vp8_sad16x8x3_c
;
37 cpi
->rtcd
.variance
.sad8x16x3
= vp8_sad8x16x3_c
;
38 cpi
->rtcd
.variance
.sad8x8x3
= vp8_sad8x8x3_c
;
39 cpi
->rtcd
.variance
.sad4x4x3
= vp8_sad4x4x3_c
;
41 cpi
->rtcd
.variance
.sad16x16x8
= vp8_sad16x16x8_c
;
42 cpi
->rtcd
.variance
.sad16x8x8
= vp8_sad16x8x8_c
;
43 cpi
->rtcd
.variance
.sad8x16x8
= vp8_sad8x16x8_c
;
44 cpi
->rtcd
.variance
.sad8x8x8
= vp8_sad8x8x8_c
;
45 cpi
->rtcd
.variance
.sad4x4x8
= vp8_sad4x4x8_c
;
47 cpi
->rtcd
.variance
.sad16x16x4d
= vp8_sad16x16x4d_c
;
48 cpi
->rtcd
.variance
.sad16x8x4d
= vp8_sad16x8x4d_c
;
49 cpi
->rtcd
.variance
.sad8x16x4d
= vp8_sad8x16x4d_c
;
50 cpi
->rtcd
.variance
.sad8x8x4d
= vp8_sad8x8x4d_c
;
51 cpi
->rtcd
.variance
.sad4x4x4d
= vp8_sad4x4x4d_c
;
53 cpi
->rtcd
.variance
.var4x4
= vp8_variance4x4_c
;
54 cpi
->rtcd
.variance
.var8x8
= vp8_variance8x8_c
;
55 cpi
->rtcd
.variance
.var8x16
= vp8_variance8x16_c
;
56 cpi
->rtcd
.variance
.var16x8
= vp8_variance16x8_c
;
57 cpi
->rtcd
.variance
.var16x16
= vp8_variance16x16_c
;
59 cpi
->rtcd
.variance
.subpixvar4x4
= vp8_sub_pixel_variance4x4_c
;
60 cpi
->rtcd
.variance
.subpixvar8x8
= vp8_sub_pixel_variance8x8_c
;
61 cpi
->rtcd
.variance
.subpixvar8x16
= vp8_sub_pixel_variance8x16_c
;
62 cpi
->rtcd
.variance
.subpixvar16x8
= vp8_sub_pixel_variance16x8_c
;
63 cpi
->rtcd
.variance
.subpixvar16x16
= vp8_sub_pixel_variance16x16_c
;
64 cpi
->rtcd
.variance
.halfpixvar16x16_h
= vp8_variance_halfpixvar16x16_h_c
;
65 cpi
->rtcd
.variance
.halfpixvar16x16_v
= vp8_variance_halfpixvar16x16_v_c
;
66 cpi
->rtcd
.variance
.halfpixvar16x16_hv
= vp8_variance_halfpixvar16x16_hv_c
;
67 cpi
->rtcd
.variance
.subpixmse16x16
= vp8_sub_pixel_mse16x16_c
;
69 cpi
->rtcd
.variance
.mse16x16
= vp8_mse16x16_c
;
70 cpi
->rtcd
.variance
.getmbss
= vp8_get_mb_ss_c
;
72 cpi
->rtcd
.variance
.get16x16prederror
= vp8_get16x16pred_error_c
;
73 cpi
->rtcd
.variance
.get8x8var
= vp8_get8x8var_c
;
74 cpi
->rtcd
.variance
.get16x16var
= vp8_get16x16var_c
;;
75 cpi
->rtcd
.variance
.get4x4sse_cs
= vp8_get4x4sse_cs_c
;
77 cpi
->rtcd
.fdct
.short4x4
= vp8_short_fdct4x4_c
;
78 cpi
->rtcd
.fdct
.short8x4
= vp8_short_fdct8x4_c
;
79 cpi
->rtcd
.fdct
.fast4x4
= vp8_short_fdct4x4_c
;
80 cpi
->rtcd
.fdct
.fast8x4
= vp8_short_fdct8x4_c
;
81 cpi
->rtcd
.fdct
.walsh_short4x4
= vp8_short_walsh4x4_c
;
83 cpi
->rtcd
.encodemb
.berr
= vp8_block_error_c
;
84 cpi
->rtcd
.encodemb
.mberr
= vp8_mbblock_error_c
;
85 cpi
->rtcd
.encodemb
.mbuverr
= vp8_mbuverror_c
;
86 cpi
->rtcd
.encodemb
.subb
= vp8_subtract_b_c
;
87 cpi
->rtcd
.encodemb
.submby
= vp8_subtract_mby_c
;
88 cpi
->rtcd
.encodemb
.submbuv
= vp8_subtract_mbuv_c
;
90 cpi
->rtcd
.quantize
.quantb
= vp8_regular_quantize_b
;
91 cpi
->rtcd
.quantize
.fastquantb
= vp8_fast_quantize_b_c
;
92 cpi
->rtcd
.search
.full_search
= vp8_full_search_sad
;
93 cpi
->rtcd
.search
.diamond_search
= vp8_diamond_search_sad
;
94 #if !(CONFIG_REALTIME_ONLY)
95 cpi
->rtcd
.temporal
.apply
= vp8_temporal_filter_apply_c
;
100 vp8_yv12_copy_partial_frame_ptr
= vp8_yv12_copy_partial_frame
;
103 cpi
->rtcd
.variance
.ssimpf_8x8
= ssim_parms_8x8_c
;
104 cpi
->rtcd
.variance
.ssimpf
= ssim_parms_c
;
107 #if ARCH_X86 || ARCH_X86_64
108 vp8_arch_x86_encoder_init(cpi
);
112 vp8_arch_arm_encoder_init(cpi
);