2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include "vpx_ports/config.h"
13 #include "vpx_ports/arm.h"
14 #include "vp8/encoder/variance.h"
15 #include "vp8/encoder/onyx_int.h"
17 extern void (*vp8_yv12_copy_partial_frame_ptr
)(YV12_BUFFER_CONFIG
*src_ybc
, YV12_BUFFER_CONFIG
*dst_ybc
, int Fraction
);
18 extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG
*src_ybc
, YV12_BUFFER_CONFIG
*dst_ybc
, int Fraction
);
19 extern void vpxyv12_copy_partial_frame_neon(YV12_BUFFER_CONFIG
*src_ybc
, YV12_BUFFER_CONFIG
*dst_ybc
, int Fraction
);
21 void vp8_arch_arm_encoder_init(VP8_COMP
*cpi
)
23 #if CONFIG_RUNTIME_CPU_DETECT
24 int flags
= cpi
->common
.rtcd
.flags
;
25 int has_edsp
= flags
& HAS_EDSP
;
26 int has_media
= flags
& HAS_MEDIA
;
27 int has_neon
= flags
& HAS_NEON
;
32 cpi
->rtcd
.variance
.sad16x16
= vp8_sad16x16_armv6
;
33 /*cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
34 cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
35 cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
36 cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;*/
38 /*cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;*/
39 cpi
->rtcd
.variance
.var8x8
= vp8_variance8x8_armv6
;
40 /*cpi->rtcd.variance.var8x16 = vp8_variance8x16_c;
41 cpi->rtcd.variance.var16x8 = vp8_variance16x8_c;*/
42 cpi
->rtcd
.variance
.var16x16
= vp8_variance16x16_armv6
;
44 /*cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;*/
45 cpi
->rtcd
.variance
.subpixvar8x8
= vp8_sub_pixel_variance8x8_armv6
;
46 /*cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
47 cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
48 cpi
->rtcd
.variance
.subpixvar16x16
= vp8_sub_pixel_variance16x16_armv6
;
49 cpi
->rtcd
.variance
.halfpixvar16x16_h
= vp8_variance_halfpixvar16x16_h_armv6
;
50 cpi
->rtcd
.variance
.halfpixvar16x16_v
= vp8_variance_halfpixvar16x16_v_armv6
;
51 cpi
->rtcd
.variance
.halfpixvar16x16_hv
= vp8_variance_halfpixvar16x16_hv_armv6
;
53 cpi
->rtcd
.variance
.mse16x16
= vp8_mse16x16_armv6
;
54 /*cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;*/
56 /*cpi->rtcd.variance.get16x16prederror = vp8_get16x16pred_error_c;
57 cpi->rtcd.variance.get8x8var = vp8_get8x8var_c;
58 cpi->rtcd.variance.get16x16var = vp8_get16x16var_c;;
59 cpi->rtcd.variance.get4x4sse_cs = vp8_get4x4sse_cs_c;*/
61 /*cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
62 cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;*/
63 cpi
->rtcd
.fdct
.fast4x4
= vp8_fast_fdct4x4_armv6
;
64 cpi
->rtcd
.fdct
.fast8x4
= vp8_fast_fdct8x4_armv6
;
65 cpi
->rtcd
.fdct
.walsh_short4x4
= vp8_short_walsh4x4_armv6
;
67 /*cpi->rtcd.encodemb.berr = vp8_block_error_c;
68 cpi->rtcd.encodemb.mberr = vp8_mbblock_error_c;
69 cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_c;*/
70 cpi
->rtcd
.encodemb
.subb
= vp8_subtract_b_armv6
;
71 cpi
->rtcd
.encodemb
.submby
= vp8_subtract_mby_armv6
;
72 cpi
->rtcd
.encodemb
.submbuv
= vp8_subtract_mbuv_armv6
;
74 /*cpi->rtcd.quantize.quantb = vp8_regular_quantize_b;*/
75 cpi
->rtcd
.quantize
.fastquantb
= vp8_fast_quantize_b_armv6
;
82 cpi
->rtcd
.variance
.sad16x16
= vp8_sad16x16_neon
;
83 cpi
->rtcd
.variance
.sad16x8
= vp8_sad16x8_neon
;
84 cpi
->rtcd
.variance
.sad8x16
= vp8_sad8x16_neon
;
85 cpi
->rtcd
.variance
.sad8x8
= vp8_sad8x8_neon
;
86 cpi
->rtcd
.variance
.sad4x4
= vp8_sad4x4_neon
;
88 /*cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;*/
89 cpi
->rtcd
.variance
.var8x8
= vp8_variance8x8_neon
;
90 cpi
->rtcd
.variance
.var8x16
= vp8_variance8x16_neon
;
91 cpi
->rtcd
.variance
.var16x8
= vp8_variance16x8_neon
;
92 cpi
->rtcd
.variance
.var16x16
= vp8_variance16x16_neon
;
94 /*cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;*/
95 cpi
->rtcd
.variance
.subpixvar8x8
= vp8_sub_pixel_variance8x8_neon
;
96 /*cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
97 cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
98 cpi
->rtcd
.variance
.subpixvar16x16
= vp8_sub_pixel_variance16x16_neon
;
99 cpi
->rtcd
.variance
.halfpixvar16x16_h
= vp8_variance_halfpixvar16x16_h_neon
;
100 cpi
->rtcd
.variance
.halfpixvar16x16_v
= vp8_variance_halfpixvar16x16_v_neon
;
101 cpi
->rtcd
.variance
.halfpixvar16x16_hv
= vp8_variance_halfpixvar16x16_hv_neon
;
103 cpi
->rtcd
.variance
.mse16x16
= vp8_mse16x16_neon
;
104 /*cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;*/
106 cpi
->rtcd
.variance
.get16x16prederror
= vp8_get16x16pred_error_neon
;
107 /*cpi->rtcd.variance.get8x8var = vp8_get8x8var_c;
108 cpi->rtcd.variance.get16x16var = vp8_get16x16var_c;*/
109 cpi
->rtcd
.variance
.get4x4sse_cs
= vp8_get4x4sse_cs_neon
;
111 cpi
->rtcd
.fdct
.short4x4
= vp8_short_fdct4x4_neon
;
112 cpi
->rtcd
.fdct
.short8x4
= vp8_short_fdct8x4_neon
;
113 cpi
->rtcd
.fdct
.fast4x4
= vp8_fast_fdct4x4_neon
;
114 cpi
->rtcd
.fdct
.fast8x4
= vp8_fast_fdct8x4_neon
;
115 cpi
->rtcd
.fdct
.walsh_short4x4
= vp8_short_walsh4x4_neon
;
117 /*cpi->rtcd.encodemb.berr = vp8_block_error_c;
118 cpi->rtcd.encodemb.mberr = vp8_mbblock_error_c;
119 cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_c;*/
120 cpi
->rtcd
.encodemb
.subb
= vp8_subtract_b_neon
;
121 cpi
->rtcd
.encodemb
.submby
= vp8_subtract_mby_neon
;
122 cpi
->rtcd
.encodemb
.submbuv
= vp8_subtract_mbuv_neon
;
124 /*cpi->rtcd.quantize.quantb = vp8_regular_quantize_b;
125 cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_c;*/
126 /* The neon quantizer has not been updated to match the new exact
127 * quantizer introduced in commit e04e2935
129 /*cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_neon;*/
134 #if CONFIG_RUNTIME_CPU_DETECT
138 vp8_yv12_copy_partial_frame_ptr
= vpxyv12_copy_partial_frame_neon
;