aarch64: Add assembly support for -fsanitize=hwaddress tagged globals.
[libav.git] / libavcodec / x86 / hpeldsp_init.c
blobd47e7883dfdd35fe00a2ad13e322bb1cabcce445
1 /*
2 * SIMD-optimized halfpel functions
3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of Libav.
8 * Libav is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * Libav is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with Libav; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
25 #include "libavutil/attributes.h"
26 #include "libavutil/cpu.h"
27 #include "libavutil/x86/cpu.h"
28 #include "libavcodec/avcodec.h"
29 #include "libavcodec/hpeldsp.h"
30 #include "libavcodec/pixels.h"
31 #include "fpel.h"
32 #include "hpeldsp.h"
34 void ff_put_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
35 ptrdiff_t line_size, int h);
36 void ff_put_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
37 ptrdiff_t line_size, int h);
38 void ff_put_pixels16_x2_mmxext(uint8_t *block, const uint8_t *pixels,
39 ptrdiff_t line_size, int h);
40 void ff_put_pixels16_x2_3dnow(uint8_t *block, const uint8_t *pixels,
41 ptrdiff_t line_size, int h);
42 void ff_put_no_rnd_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
43 ptrdiff_t line_size, int h);
44 void ff_put_no_rnd_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
45 ptrdiff_t line_size, int h);
46 void ff_put_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
47 ptrdiff_t line_size, int h);
48 void ff_put_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
49 ptrdiff_t line_size, int h);
50 void ff_put_no_rnd_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
51 ptrdiff_t line_size, int h);
52 void ff_put_no_rnd_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
53 ptrdiff_t line_size, int h);
54 void ff_avg_pixels8_3dnow(uint8_t *block, const uint8_t *pixels,
55 ptrdiff_t line_size, int h);
56 void ff_avg_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
57 ptrdiff_t line_size, int h);
58 void ff_avg_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
59 ptrdiff_t line_size, int h);
60 void ff_avg_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
61 ptrdiff_t line_size, int h);
62 void ff_avg_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
63 ptrdiff_t line_size, int h);
64 void ff_avg_pixels8_xy2_mmxext(uint8_t *block, const uint8_t *pixels,
65 ptrdiff_t line_size, int h);
66 void ff_avg_pixels8_xy2_3dnow(uint8_t *block, const uint8_t *pixels,
67 ptrdiff_t line_size, int h);
69 #define avg_pixels8_mmx ff_avg_pixels8_mmx
70 #define avg_pixels8_x2_mmx ff_avg_pixels8_x2_mmx
71 #define avg_pixels16_mmx ff_avg_pixels16_mmx
72 #define avg_pixels8_xy2_mmx ff_avg_pixels8_xy2_mmx
73 #define avg_pixels16_xy2_mmx ff_avg_pixels16_xy2_mmx
74 #define put_pixels8_mmx ff_put_pixels8_mmx
75 #define put_pixels16_mmx ff_put_pixels16_mmx
76 #define put_pixels8_xy2_mmx ff_put_pixels8_xy2_mmx
77 #define put_pixels16_xy2_mmx ff_put_pixels16_xy2_mmx
78 #define avg_no_rnd_pixels16_mmx ff_avg_pixels16_mmx
79 #define put_no_rnd_pixels8_mmx ff_put_pixels8_mmx
80 #define put_no_rnd_pixels16_mmx ff_put_pixels16_mmx
82 #if HAVE_INLINE_ASM
84 /***********************************/
85 /* MMX no rounding */
86 #define DEF(x, y) x ## _no_rnd_ ## y ## _mmx
87 #define SET_RND MOVQ_WONE
88 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
89 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
90 #define STATIC static
92 #include "rnd_template.c"
93 #include "hpeldsp_rnd_template.c"
95 #undef DEF
96 #undef SET_RND
97 #undef PAVGBP
98 #undef PAVGB
99 #undef STATIC
101 CALL_2X_PIXELS(avg_no_rnd_pixels16_y2_mmx, avg_no_rnd_pixels8_y2_mmx, 8)
102 CALL_2X_PIXELS(put_no_rnd_pixels16_y2_mmx, put_no_rnd_pixels8_y2_mmx, 8)
104 CALL_2X_PIXELS(avg_no_rnd_pixels16_xy2_mmx, avg_no_rnd_pixels8_xy2_mmx, 8)
105 CALL_2X_PIXELS(put_no_rnd_pixels16_xy2_mmx, put_no_rnd_pixels8_xy2_mmx, 8)
107 /***********************************/
108 /* MMX rounding */
110 #define DEF(x, y) x ## _ ## y ## _mmx
111 #define SET_RND MOVQ_WTWO
112 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
113 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
115 #include "hpeldsp_rnd_template.c"
117 #undef DEF
118 #define DEF(x, y) ff_ ## x ## _ ## y ## _mmx
119 #define STATIC
121 #include "rnd_template.c"
123 #undef DEF
124 #undef SET_RND
125 #undef PAVGBP
126 #undef PAVGB
128 CALL_2X_PIXELS(avg_pixels16_y2_mmx, avg_pixels8_y2_mmx, 8)
129 CALL_2X_PIXELS(put_pixels16_y2_mmx, put_pixels8_y2_mmx, 8)
131 CALL_2X_PIXELS_EXPORT(ff_avg_pixels16_xy2_mmx, ff_avg_pixels8_xy2_mmx, 8)
132 CALL_2X_PIXELS_EXPORT(ff_put_pixels16_xy2_mmx, ff_put_pixels8_xy2_mmx, 8)
134 #endif /* HAVE_INLINE_ASM */
137 #if HAVE_X86ASM
139 #define HPELDSP_AVG_PIXELS16(CPUEXT) \
140 CALL_2X_PIXELS(put_no_rnd_pixels16_x2 ## CPUEXT, ff_put_no_rnd_pixels8_x2 ## CPUEXT, 8) \
141 CALL_2X_PIXELS(put_pixels16_y2 ## CPUEXT, ff_put_pixels8_y2 ## CPUEXT, 8) \
142 CALL_2X_PIXELS(put_no_rnd_pixels16_y2 ## CPUEXT, ff_put_no_rnd_pixels8_y2 ## CPUEXT, 8) \
143 CALL_2X_PIXELS(avg_pixels16 ## CPUEXT, ff_avg_pixels8 ## CPUEXT, 8) \
144 CALL_2X_PIXELS(avg_pixels16_x2 ## CPUEXT, ff_avg_pixels8_x2 ## CPUEXT, 8) \
145 CALL_2X_PIXELS(avg_pixels16_y2 ## CPUEXT, ff_avg_pixels8_y2 ## CPUEXT, 8) \
146 CALL_2X_PIXELS(avg_pixels16_xy2 ## CPUEXT, ff_avg_pixels8_xy2 ## CPUEXT, 8)
148 HPELDSP_AVG_PIXELS16(_3dnow)
149 HPELDSP_AVG_PIXELS16(_mmxext)
151 #endif /* HAVE_X86ASM */
153 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
154 do { \
155 c->PFX ## _pixels_tab IDX [0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
156 c->PFX ## _pixels_tab IDX [1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
157 c->PFX ## _pixels_tab IDX [2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
158 c->PFX ## _pixels_tab IDX [3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU; \
159 } while (0)
161 static void hpeldsp_init_mmx(HpelDSPContext *c, int flags)
163 #if HAVE_MMX_INLINE
164 SET_HPEL_FUNCS(put, [0], 16, mmx);
165 SET_HPEL_FUNCS(put_no_rnd, [0], 16, mmx);
166 SET_HPEL_FUNCS(avg, [0], 16, mmx);
167 SET_HPEL_FUNCS(avg_no_rnd, , 16, mmx);
168 SET_HPEL_FUNCS(put, [1], 8, mmx);
169 SET_HPEL_FUNCS(put_no_rnd, [1], 8, mmx);
170 SET_HPEL_FUNCS(avg, [1], 8, mmx);
171 #endif /* HAVE_MMX_INLINE */
174 static void hpeldsp_init_mmxext(HpelDSPContext *c, int flags)
176 #if HAVE_MMXEXT_EXTERNAL
177 c->put_pixels_tab[0][1] = ff_put_pixels16_x2_mmxext;
178 c->put_pixels_tab[0][2] = put_pixels16_y2_mmxext;
180 c->avg_pixels_tab[0][0] = avg_pixels16_mmxext;
181 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmxext;
182 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmxext;
184 c->put_pixels_tab[1][1] = ff_put_pixels8_x2_mmxext;
185 c->put_pixels_tab[1][2] = ff_put_pixels8_y2_mmxext;
187 c->avg_pixels_tab[1][0] = ff_avg_pixels8_mmxext;
188 c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_mmxext;
189 c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_mmxext;
191 if (!(flags & AV_CODEC_FLAG_BITEXACT)) {
192 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmxext;
193 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmxext;
194 c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_mmxext;
195 c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_mmxext;
197 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmxext;
198 c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_mmxext;
200 #endif /* HAVE_MMXEXT_EXTERNAL */
203 static void hpeldsp_init_3dnow(HpelDSPContext *c, int flags)
205 #if HAVE_AMD3DNOW_EXTERNAL
206 c->put_pixels_tab[0][1] = ff_put_pixels16_x2_3dnow;
207 c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
209 c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
210 c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
211 c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
213 c->put_pixels_tab[1][1] = ff_put_pixels8_x2_3dnow;
214 c->put_pixels_tab[1][2] = ff_put_pixels8_y2_3dnow;
216 c->avg_pixels_tab[1][0] = ff_avg_pixels8_3dnow;
217 c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_3dnow;
218 c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_3dnow;
220 if (!(flags & AV_CODEC_FLAG_BITEXACT)){
221 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
222 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
223 c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_3dnow;
224 c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_3dnow;
226 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
227 c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_3dnow;
229 #endif /* HAVE_AMD3DNOW_EXTERNAL */
232 static void hpeldsp_init_sse2_fast(HpelDSPContext *c, int flags)
234 #if HAVE_SSE2_EXTERNAL
235 c->put_pixels_tab[0][0] = ff_put_pixels16_sse2;
236 c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_sse2;
237 c->avg_pixels_tab[0][0] = ff_avg_pixels16_sse2;
238 #endif /* HAVE_SSE2_EXTERNAL */
241 av_cold void ff_hpeldsp_init_x86(HpelDSPContext *c, int flags)
243 int cpu_flags = av_get_cpu_flags();
245 if (INLINE_MMX(cpu_flags))
246 hpeldsp_init_mmx(c, flags);
248 if (EXTERNAL_AMD3DNOW(cpu_flags))
249 hpeldsp_init_3dnow(c, flags);
251 if (EXTERNAL_MMXEXT(cpu_flags))
252 hpeldsp_init_mmxext(c, flags);
254 if (EXTERNAL_SSE2_FAST(cpu_flags))
255 hpeldsp_init_sse2_fast(c, flags);
257 if (CONFIG_VP3_DECODER)
258 ff_hpeldsp_vp3_init_x86(c, cpu_flags);