avformat/mov: check for tts_count before deferencing tts_data
[FFMpeg-mirror.git] / libswscale / x86 / rgb_2_rgb.asm
blobca7a481255ed2b386a88ed47e32163c2f36343d7
1 ;******************************************************************************
2 ;* Copyright Nick Kurshev
3 ;* Copyright Michael (michaelni@gmx.at)
4 ;* Copyright 2018 Jokyo Images
5 ;* Copyright Ivo van Poorten
6 ;*
7 ;* This file is part of FFmpeg.
8 ;*
9 ;* FFmpeg is free software; you can redistribute it and/or
10 ;* modify it under the terms of the GNU Lesser General Public
11 ;* License as published by the Free Software Foundation; either
12 ;* version 2.1 of the License, or (at your option) any later version.
14 ;* FFmpeg is distributed in the hope that it will be useful,
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 ;* Lesser General Public License for more details.
19 ;* You should have received a copy of the GNU Lesser General Public
20 ;* License along with FFmpeg; if not, write to the Free Software
21 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 ;******************************************************************************
24 %include "libavutil/x86/x86util.asm"
26 SECTION_RODATA
28 pb_shuffle2103: db 2, 1, 0, 3, 6, 5, 4, 7, 10, 9, 8, 11, 14, 13, 12, 15
29 pb_shuffle0321: db 0, 3, 2, 1, 4, 7, 6, 5, 8, 11, 10, 9, 12, 15, 14, 13
30 pb_shuffle1230: db 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8, 13, 14, 15, 12
31 pb_shuffle3012: db 3, 0, 1, 2, 7, 4, 5, 6, 11, 8, 9, 10, 15, 12, 13, 14
32 pb_shuffle3210: db 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
33 pb_shuffle3102: db 3, 1, 0, 2, 7, 5, 4, 6, 11, 9, 8, 10, 15, 13, 12, 14
34 pb_shuffle2013: db 2, 0, 1, 3, 6, 4, 5, 7, 10, 8, 9, 11, 14, 12, 13, 15
35 pb_shuffle2130: db 2, 1, 3, 0, 6, 5, 7, 4, 10, 9, 11, 8, 14, 13, 15, 12
36 pb_shuffle1203: db 1, 2, 0, 3, 5, 6, 4, 7, 9, 10, 8, 11, 13, 14, 12, 15
38 SECTION .text
40 %macro RSHIFT_COPY 5
41 ; %1 dst ; %2 src ; %3 shift
42 %if mmsize == 32
43 vperm2i128 %1, %2, %3, %5
44 RSHIFT %1, %4
45 %elif cpuflag(avx)
46 psrldq %1, %2, %4
47 %else
48 mova %1, %2
49 RSHIFT %1, %4
50 %endif
51 %endmacro
53 ;------------------------------------------------------------------------------
54 ; shuffle_bytes_## (const uint8_t *src, uint8_t *dst, int src_size)
55 ;------------------------------------------------------------------------------
56 ; %1-4 index shuffle
57 %macro SHUFFLE_BYTES 4
58 cglobal shuffle_bytes_%1%2%3%4, 3, 5, 2, src, dst, w, tmp, x
59 VBROADCASTI128 m0, [pb_shuffle%1%2%3%4]
60 movsxdifnidn wq, wd
61 mov xq, wq
63 add srcq, wq
64 add dstq, wq
65 neg wq
67 %if mmsize == 64
68 and xq, mmsize - 4
69 shr xq, 2
70 mov tmpd, -1
71 shlx tmpd, tmpd, xd
72 not tmpd
73 kmovw k7, tmpd
74 vmovdqu32 m1{k7}{z}, [srcq + wq]
75 pshufb m1, m0
76 vmovdqu32 [dstq + wq]{k7}, m1
77 lea wq, [wq + 4 * xq]
78 %else
79 ;calc scalar loop
80 and xq, mmsize-4
81 je .loop_simd
83 .loop_scalar:
84 mov tmpb, [srcq + wq + %1]
85 mov [dstq+wq + 0], tmpb
86 mov tmpb, [srcq + wq + %2]
87 mov [dstq+wq + 1], tmpb
88 mov tmpb, [srcq + wq + %3]
89 mov [dstq+wq + 2], tmpb
90 mov tmpb, [srcq + wq + %4]
91 mov [dstq+wq + 3], tmpb
92 add wq, 4
93 sub xq, 4
94 jg .loop_scalar
95 %endif
97 ;check if src_size < mmsize
98 cmp wq, 0
99 jge .end
101 .loop_simd:
102 movu m1, [srcq + wq]
103 pshufb m1, m0
104 movu [dstq + wq], m1
105 add wq, mmsize
106 jl .loop_simd
108 .end:
110 %endmacro
112 INIT_XMM ssse3
113 SHUFFLE_BYTES 2, 1, 0, 3
114 SHUFFLE_BYTES 0, 3, 2, 1
115 SHUFFLE_BYTES 1, 2, 3, 0
116 SHUFFLE_BYTES 3, 0, 1, 2
117 SHUFFLE_BYTES 3, 2, 1, 0
118 SHUFFLE_BYTES 3, 1, 0, 2
119 SHUFFLE_BYTES 2, 0, 1, 3
120 SHUFFLE_BYTES 2, 1, 3, 0
121 SHUFFLE_BYTES 1, 2, 0, 3
123 %if ARCH_X86_64
124 %if HAVE_AVX2_EXTERNAL
125 INIT_YMM avx2
126 SHUFFLE_BYTES 2, 1, 0, 3
127 SHUFFLE_BYTES 0, 3, 2, 1
128 SHUFFLE_BYTES 1, 2, 3, 0
129 SHUFFLE_BYTES 3, 0, 1, 2
130 SHUFFLE_BYTES 3, 2, 1, 0
131 SHUFFLE_BYTES 3, 1, 0, 2
132 SHUFFLE_BYTES 2, 0, 1, 3
133 SHUFFLE_BYTES 2, 1, 3, 0
134 SHUFFLE_BYTES 1, 2, 0, 3
135 %endif
136 %endif
138 %if ARCH_X86_64
139 %if HAVE_AVX512ICL_EXTERNAL
140 INIT_ZMM avx512icl
141 SHUFFLE_BYTES 2, 1, 0, 3
142 SHUFFLE_BYTES 0, 3, 2, 1
143 SHUFFLE_BYTES 1, 2, 3, 0
144 SHUFFLE_BYTES 3, 0, 1, 2
145 SHUFFLE_BYTES 3, 2, 1, 0
146 SHUFFLE_BYTES 3, 1, 0, 2
147 SHUFFLE_BYTES 2, 0, 1, 3
148 SHUFFLE_BYTES 2, 1, 3, 0
149 SHUFFLE_BYTES 1, 2, 0, 3
150 %endif
151 %endif
153 ;-----------------------------------------------------------------------------------------------
154 ; uyvytoyuv422(uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
155 ; const uint8_t *src, int width, int height,
156 ; int lumStride, int chromStride, int srcStride)
157 ;-----------------------------------------------------------------------------------------------
158 %macro UYVY_TO_YUV422 0
159 cglobal uyvytoyuv422, 9, 14, 8, ydst, udst, vdst, src, w, h, lum_stride, chrom_stride, src_stride, wtwo, whalf, tmp, x, back_w
160 pxor m0, m0
161 pcmpeqw m1, m1
162 psrlw m1, 8
164 movsxdifnidn wq, wd
165 movsxdifnidn lum_strideq, lum_strided
166 movsxdifnidn chrom_strideq, chrom_strided
167 movsxdifnidn src_strideq, src_strided
169 mov back_wq, wq
170 mov whalfq, wq
171 shr whalfq, 1 ; whalf = width / 2
173 lea srcq, [srcq + wq * 2]
174 add ydstq, wq
175 add udstq, whalfq
176 add vdstq, whalfq
178 .loop_line:
179 mov xq, wq
180 mov wtwoq, wq
181 add wtwoq, wtwoq ; wtwo = width * 2
183 neg wq
184 neg wtwoq
185 neg whalfq
187 ;calc scalar loop count
188 and xq, mmsize * 2 - 1
189 je .loop_simd
191 .loop_scalar:
192 mov tmpb, [srcq + wtwoq + 0]
193 mov [udstq + whalfq], tmpb
195 mov tmpb, [srcq + wtwoq + 1]
196 mov [ydstq + wq], tmpb
198 mov tmpb, [srcq + wtwoq + 2]
199 mov [vdstq + whalfq], tmpb
201 mov tmpb, [srcq + wtwoq + 3]
202 mov [ydstq + wq + 1], tmpb
204 add wq, 2
205 add wtwoq, 4
206 add whalfq, 1
207 sub xq, 2
208 jg .loop_scalar
210 ; check if simd loop is need
211 cmp wq, 0
212 jge .end_line
214 .loop_simd:
215 %if mmsize == 32
216 movu xm2, [srcq + wtwoq ]
217 movu xm3, [srcq + wtwoq + 16 ]
218 movu xm4, [srcq + wtwoq + 16 * 2]
219 movu xm5, [srcq + wtwoq + 16 * 3]
220 vinserti128 m2, m2, [srcq + wtwoq + 16 * 4], 1
221 vinserti128 m3, m3, [srcq + wtwoq + 16 * 5], 1
222 vinserti128 m4, m4, [srcq + wtwoq + 16 * 6], 1
223 vinserti128 m5, m5, [srcq + wtwoq + 16 * 7], 1
224 %else
225 movu m2, [srcq + wtwoq ]
226 movu m3, [srcq + wtwoq + mmsize ]
227 movu m4, [srcq + wtwoq + mmsize * 2]
228 movu m5, [srcq + wtwoq + mmsize * 3]
229 %endif
231 ; extract y part 1
232 RSHIFT_COPY m6, m2, m4, 1, 0x20 ; UYVY UYVY -> YVYU YVY...
233 pand m6, m1; YxYx YxYx...
235 RSHIFT_COPY m7, m3, m5, 1, 0x20 ; UYVY UYVY -> YVYU YVY...
236 pand m7, m1 ; YxYx YxYx...
238 packuswb m6, m7 ; YYYY YYYY...
239 movu [ydstq + wq], m6
241 ; extract y part 2
242 RSHIFT_COPY m6, m4, m2, 1, 0x13 ; UYVY UYVY -> YVYU YVY...
243 pand m6, m1; YxYx YxYx...
245 RSHIFT_COPY m7, m5, m3, 1, 0x13 ; UYVY UYVY -> YVYU YVY...
246 pand m7, m1 ; YxYx YxYx...
248 packuswb m6, m7 ; YYYY YYYY...
249 movu [ydstq + wq + mmsize], m6
251 ; extract uv
252 pand m2, m1 ; UxVx...
253 pand m3, m1 ; UxVx...
254 pand m4, m1 ; UxVx...
255 pand m5, m1 ; UxVx...
257 packuswb m2, m3 ; UVUV...
258 packuswb m4, m5 ; UVUV...
261 pand m6, m2, m1 ; UxUx...
262 pand m7, m4, m1 ; UxUx...
264 packuswb m6, m7 ; UUUU
265 movu [udstq + whalfq], m6
269 psrlw m2, 8 ; VxVx...
270 psrlw m4, 8 ; VxVx...
271 packuswb m2, m4 ; VVVV
272 movu [vdstq + whalfq], m2
274 add whalfq, mmsize
275 add wtwoq, mmsize * 4
276 add wq, mmsize * 2
277 jl .loop_simd
279 .end_line:
280 add srcq, src_strideq
281 add ydstq, lum_strideq
282 add udstq, chrom_strideq
283 add vdstq, chrom_strideq
285 ;restore initial state of line variable
286 mov wq, back_wq
287 mov xq, wq
288 mov whalfq, wq
289 shr whalfq, 1 ; whalf = width / 2
290 sub hd, 1
291 jg .loop_line
294 %endmacro
296 %if ARCH_X86_64
297 INIT_XMM sse2
298 UYVY_TO_YUV422
300 INIT_XMM avx
301 UYVY_TO_YUV422
302 %if HAVE_AVX2_EXTERNAL
303 INIT_YMM avx2
304 UYVY_TO_YUV422
305 %endif
306 %endif