Merge "Skip computation of distortion in vp8_pick_inter_mode if active_map is used"
[libvpx.git] / vp8 / encoder / generic / csystemdependent.c
bloba14843a8095415923601da7edfbbbd0d919b9959
1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
12 #include "vpx_ports/config.h"
13 #include "vp8/encoder/variance.h"
14 #include "vp8/encoder/onyx_int.h"
17 void vp8_arch_x86_encoder_init(VP8_COMP *cpi);
18 void vp8_arch_arm_encoder_init(VP8_COMP *cpi);
20 void (*vp8_yv12_copy_partial_frame_ptr)(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
21 extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
23 void vp8_cmachine_specific_config(VP8_COMP *cpi)
25 #if CONFIG_RUNTIME_CPU_DETECT
26 cpi->rtcd.common = &cpi->common.rtcd;
27 cpi->rtcd.variance.sad16x16 = vp8_sad16x16_c;
28 cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
29 cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
30 cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
31 cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;
33 cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_c;
34 cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_c;
35 cpi->rtcd.variance.sad8x16x3 = vp8_sad8x16x3_c;
36 cpi->rtcd.variance.sad8x8x3 = vp8_sad8x8x3_c;
37 cpi->rtcd.variance.sad4x4x3 = vp8_sad4x4x3_c;
39 cpi->rtcd.variance.sad16x16x8 = vp8_sad16x16x8_c;
40 cpi->rtcd.variance.sad16x8x8 = vp8_sad16x8x8_c;
41 cpi->rtcd.variance.sad8x16x8 = vp8_sad8x16x8_c;
42 cpi->rtcd.variance.sad8x8x8 = vp8_sad8x8x8_c;
43 cpi->rtcd.variance.sad4x4x8 = vp8_sad4x4x8_c;
45 cpi->rtcd.variance.sad16x16x4d = vp8_sad16x16x4d_c;
46 cpi->rtcd.variance.sad16x8x4d = vp8_sad16x8x4d_c;
47 cpi->rtcd.variance.sad8x16x4d = vp8_sad8x16x4d_c;
48 cpi->rtcd.variance.sad8x8x4d = vp8_sad8x8x4d_c;
49 cpi->rtcd.variance.sad4x4x4d = vp8_sad4x4x4d_c;
50 #if ARCH_X86 || ARCH_X86_64
51 cpi->rtcd.variance.copy32xn = vp8_copy32xn_c;
52 #endif
53 cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;
54 cpi->rtcd.variance.var8x8 = vp8_variance8x8_c;
55 cpi->rtcd.variance.var8x16 = vp8_variance8x16_c;
56 cpi->rtcd.variance.var16x8 = vp8_variance16x8_c;
57 cpi->rtcd.variance.var16x16 = vp8_variance16x16_c;
59 cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;
60 cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_c;
61 cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
62 cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;
63 cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_c;
64 cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_c;
65 cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_c;
66 cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_c;
67 cpi->rtcd.variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_c;
69 cpi->rtcd.variance.mse16x16 = vp8_mse16x16_c;
70 cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;
72 cpi->rtcd.variance.get4x4sse_cs = vp8_get4x4sse_cs_c;
74 cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
75 cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
76 cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
77 cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_c;
78 cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
80 cpi->rtcd.encodemb.berr = vp8_block_error_c;
81 cpi->rtcd.encodemb.mberr = vp8_mbblock_error_c;
82 cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_c;
83 cpi->rtcd.encodemb.subb = vp8_subtract_b_c;
84 cpi->rtcd.encodemb.submby = vp8_subtract_mby_c;
85 cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_c;
87 cpi->rtcd.quantize.quantb = vp8_regular_quantize_b;
88 cpi->rtcd.quantize.quantb_pair = vp8_regular_quantize_b_pair;
89 cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_c;
90 cpi->rtcd.quantize.fastquantb_pair = vp8_fast_quantize_b_pair_c;
91 cpi->rtcd.search.full_search = vp8_full_search_sad;
92 cpi->rtcd.search.refining_search = vp8_refining_search_sad;
93 cpi->rtcd.search.diamond_search = vp8_diamond_search_sad;
94 #if !(CONFIG_REALTIME_ONLY)
95 cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_c;
96 #endif
97 #if CONFIG_INTERNAL_STATS
98 cpi->rtcd.variance.ssimpf_8x8 = vp8_ssim_parms_8x8_c;
99 cpi->rtcd.variance.ssimpf_16x16 = vp8_ssim_parms_16x16_c;
100 #endif
101 #endif
103 // Pure C:
104 vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame;
106 #if ARCH_X86 || ARCH_X86_64
107 vp8_arch_x86_encoder_init(cpi);
108 #endif
110 #if ARCH_ARM
111 vp8_arch_arm_encoder_init(cpi);
112 #endif