1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F
9 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512VL
11 define <2 x i64> @insert_v2i64_x1(<2 x i64> %a) {
12 ; SSE2-LABEL: insert_v2i64_x1:
14 ; SSE2-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
17 ; SSE3-LABEL: insert_v2i64_x1:
19 ; SSE3-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
22 ; SSSE3-LABEL: insert_v2i64_x1:
24 ; SSSE3-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
27 ; SSE41-LABEL: insert_v2i64_x1:
29 ; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
30 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
33 ; AVX1-LABEL: insert_v2i64_x1:
35 ; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
36 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
39 ; AVX2-LABEL: insert_v2i64_x1:
41 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
42 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
45 ; AVX512-LABEL: insert_v2i64_x1:
47 ; AVX512-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
48 ; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
50 %1 = insertelement <2 x i64> %a, i64 -1, i32 0
54 define <4 x i64> @insert_v4i64_01x3(<4 x i64> %a) {
55 ; SSE2-LABEL: insert_v4i64_01x3:
57 ; SSE2-NEXT: movlps {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
60 ; SSE3-LABEL: insert_v4i64_01x3:
62 ; SSE3-NEXT: movlps {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
65 ; SSSE3-LABEL: insert_v4i64_01x3:
67 ; SSSE3-NEXT: movlps {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
70 ; SSE41-LABEL: insert_v4i64_01x3:
72 ; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
73 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
76 ; AVX1-LABEL: insert_v4i64_01x3:
78 ; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
79 ; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
80 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
83 ; AVX2-LABEL: insert_v4i64_01x3:
85 ; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
86 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
89 ; AVX512-LABEL: insert_v4i64_01x3:
91 ; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
92 ; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
94 %1 = insertelement <4 x i64> %a, i64 -1, i32 2
98 define <4 x i32> @insert_v4i32_01x3(<4 x i32> %a) {
99 ; SSE2-LABEL: insert_v4i32_01x3:
101 ; SSE2-NEXT: movl $-1, %eax
102 ; SSE2-NEXT: movd %eax, %xmm1
103 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
104 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
107 ; SSE3-LABEL: insert_v4i32_01x3:
109 ; SSE3-NEXT: movl $-1, %eax
110 ; SSE3-NEXT: movd %eax, %xmm1
111 ; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
112 ; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
115 ; SSSE3-LABEL: insert_v4i32_01x3:
117 ; SSSE3-NEXT: movl $-1, %eax
118 ; SSSE3-NEXT: movd %eax, %xmm1
119 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
120 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
123 ; SSE41-LABEL: insert_v4i32_01x3:
125 ; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
126 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
129 ; AVX1-LABEL: insert_v4i32_01x3:
131 ; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
132 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
135 ; AVX2-LABEL: insert_v4i32_01x3:
137 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
138 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
141 ; AVX512-LABEL: insert_v4i32_01x3:
143 ; AVX512-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
144 ; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
146 %1 = insertelement <4 x i32> %a, i32 -1, i32 2
150 define <8 x i32> @insert_v8i32_x12345x7(<8 x i32> %a) {
151 ; SSE2-LABEL: insert_v8i32_x12345x7:
153 ; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
154 ; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
155 ; SSE2-NEXT: movl $-1, %eax
156 ; SSE2-NEXT: movd %eax, %xmm2
157 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[3,0]
158 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,2]
161 ; SSE3-LABEL: insert_v8i32_x12345x7:
163 ; SSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
164 ; SSE3-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
165 ; SSE3-NEXT: movl $-1, %eax
166 ; SSE3-NEXT: movd %eax, %xmm2
167 ; SSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[3,0]
168 ; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,2]
171 ; SSSE3-LABEL: insert_v8i32_x12345x7:
173 ; SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
174 ; SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
175 ; SSSE3-NEXT: movl $-1, %eax
176 ; SSSE3-NEXT: movd %eax, %xmm2
177 ; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[3,0]
178 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,2]
181 ; SSE41-LABEL: insert_v8i32_x12345x7:
183 ; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
184 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3,4,5,6,7]
185 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5],xmm1[6,7]
188 ; AVX1-LABEL: insert_v8i32_x12345x7:
190 ; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
191 ; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
192 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
195 ; AVX2-LABEL: insert_v8i32_x12345x7:
197 ; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
198 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
201 ; AVX512-LABEL: insert_v8i32_x12345x7:
203 ; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
204 ; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
206 %1 = insertelement <8 x i32> %a, i32 -1, i32 0
207 %2 = insertelement <8 x i32> %1, i32 -1, i32 6
211 define <8 x i16> @insert_v8i16_x12345x7(<8 x i16> %a) {
212 ; SSE2-LABEL: insert_v8i16_x12345x7:
214 ; SSE2-NEXT: movl $65535, %eax # imm = 0xFFFF
215 ; SSE2-NEXT: pinsrw $0, %eax, %xmm0
216 ; SSE2-NEXT: pinsrw $6, %eax, %xmm0
219 ; SSE3-LABEL: insert_v8i16_x12345x7:
221 ; SSE3-NEXT: movl $65535, %eax # imm = 0xFFFF
222 ; SSE3-NEXT: pinsrw $0, %eax, %xmm0
223 ; SSE3-NEXT: pinsrw $6, %eax, %xmm0
226 ; SSSE3-LABEL: insert_v8i16_x12345x7:
228 ; SSSE3-NEXT: movl $65535, %eax # imm = 0xFFFF
229 ; SSSE3-NEXT: pinsrw $0, %eax, %xmm0
230 ; SSSE3-NEXT: pinsrw $6, %eax, %xmm0
233 ; SSE41-LABEL: insert_v8i16_x12345x7:
235 ; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
236 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7]
239 ; AVX-LABEL: insert_v8i16_x12345x7:
241 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
242 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7]
244 %1 = insertelement <8 x i16> %a, i16 -1, i32 0
245 %2 = insertelement <8 x i16> %1, i16 -1, i32 6
249 define <16 x i16> @insert_v16i16_x12345x789ABCDEx(<16 x i16> %a) {
250 ; SSE2-LABEL: insert_v16i16_x12345x789ABCDEx:
252 ; SSE2-NEXT: movl $65535, %eax # imm = 0xFFFF
253 ; SSE2-NEXT: pinsrw $0, %eax, %xmm0
254 ; SSE2-NEXT: pinsrw $6, %eax, %xmm0
255 ; SSE2-NEXT: pinsrw $7, %eax, %xmm1
258 ; SSE3-LABEL: insert_v16i16_x12345x789ABCDEx:
260 ; SSE3-NEXT: movl $65535, %eax # imm = 0xFFFF
261 ; SSE3-NEXT: pinsrw $0, %eax, %xmm0
262 ; SSE3-NEXT: pinsrw $6, %eax, %xmm0
263 ; SSE3-NEXT: pinsrw $7, %eax, %xmm1
266 ; SSSE3-LABEL: insert_v16i16_x12345x789ABCDEx:
268 ; SSSE3-NEXT: movl $65535, %eax # imm = 0xFFFF
269 ; SSSE3-NEXT: pinsrw $0, %eax, %xmm0
270 ; SSSE3-NEXT: pinsrw $6, %eax, %xmm0
271 ; SSSE3-NEXT: pinsrw $7, %eax, %xmm1
274 ; SSE41-LABEL: insert_v16i16_x12345x789ABCDEx:
276 ; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
277 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3,4,5],xmm2[6],xmm0[7]
278 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
281 ; AVX1-LABEL: insert_v16i16_x12345x789ABCDEx:
283 ; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
284 ; AVX1-NEXT: vorps {{.*}}(%rip), %ymm0, %ymm0
285 ; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
286 ; AVX1-NEXT: vorps {{.*}}(%rip), %ymm0, %ymm0
287 ; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
288 ; AVX1-NEXT: vorps {{.*}}(%rip), %ymm0, %ymm0
291 ; AVX2-LABEL: insert_v16i16_x12345x789ABCDEx:
293 ; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
294 ; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10,11,12,13],ymm1[14],ymm0[15]
295 ; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7],ymm0[8,9,10,11,12,13,14],ymm1[15]
296 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
299 ; AVX512F-LABEL: insert_v16i16_x12345x789ABCDEx:
301 ; AVX512F-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
302 ; AVX512F-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10,11,12,13],ymm1[14],ymm0[15]
303 ; AVX512F-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7],ymm0[8,9,10,11,12,13,14],ymm1[15]
304 ; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
307 ; AVX512VL-LABEL: insert_v16i16_x12345x789ABCDEx:
309 ; AVX512VL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
310 ; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [16,1,2,3,4,5,22,7,8,9,10,11,12,13,14,31]
311 ; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
312 ; AVX512VL-NEXT: retq
313 %1 = insertelement <16 x i16> %a, i16 -1, i32 0
314 %2 = insertelement <16 x i16> %1, i16 -1, i32 6
315 %3 = insertelement <16 x i16> %2, i16 -1, i32 15
319 define <16 x i8> @insert_v16i8_x123456789ABCDEx(<16 x i8> %a) {
320 ; SSE2-LABEL: insert_v16i8_x123456789ABCDEx:
322 ; SSE2-NEXT: movl $255, %eax
323 ; SSE2-NEXT: movd %eax, %xmm1
324 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
325 ; SSE2-NEXT: por %xmm1, %xmm0
326 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
327 ; SSE2-NEXT: por {{.*}}(%rip), %xmm0
330 ; SSE3-LABEL: insert_v16i8_x123456789ABCDEx:
332 ; SSE3-NEXT: movl $255, %eax
333 ; SSE3-NEXT: movd %eax, %xmm1
334 ; SSE3-NEXT: pand {{.*}}(%rip), %xmm0
335 ; SSE3-NEXT: por %xmm1, %xmm0
336 ; SSE3-NEXT: pand {{.*}}(%rip), %xmm0
337 ; SSE3-NEXT: por {{.*}}(%rip), %xmm0
340 ; SSSE3-LABEL: insert_v16i8_x123456789ABCDEx:
342 ; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255]
343 ; SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0]
344 ; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[15,0,1,2,3,4,5,6,7,8,9,10,11,12,13],zero
345 ; SSSE3-NEXT: por {{.*}}(%rip), %xmm1
346 ; SSSE3-NEXT: movdqa %xmm1, %xmm0
349 ; SSE41-LABEL: insert_v16i8_x123456789ABCDEx:
351 ; SSE41-NEXT: movl $255, %eax
352 ; SSE41-NEXT: pinsrb $0, %eax, %xmm0
353 ; SSE41-NEXT: pinsrb $15, %eax, %xmm0
356 ; AVX-LABEL: insert_v16i8_x123456789ABCDEx:
358 ; AVX-NEXT: movl $255, %eax
359 ; AVX-NEXT: vpinsrb $0, %eax, %xmm0, %xmm0
360 ; AVX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
362 %1 = insertelement <16 x i8> %a, i8 -1, i32 0
363 %2 = insertelement <16 x i8> %1, i8 -1, i32 15
367 define <32 x i8> @insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx(<32 x i8> %a) {
368 ; SSE2-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
370 ; SSE2-NEXT: movl $255, %eax
371 ; SSE2-NEXT: movd %eax, %xmm2
372 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
373 ; SSE2-NEXT: por %xmm2, %xmm0
374 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0]
375 ; SSE2-NEXT: pand %xmm2, %xmm0
376 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255]
377 ; SSE2-NEXT: por %xmm3, %xmm0
378 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
379 ; SSE2-NEXT: por {{.*}}(%rip), %xmm1
380 ; SSE2-NEXT: pand %xmm2, %xmm1
381 ; SSE2-NEXT: por %xmm3, %xmm1
384 ; SSE3-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
386 ; SSE3-NEXT: movl $255, %eax
387 ; SSE3-NEXT: movd %eax, %xmm2
388 ; SSE3-NEXT: pand {{.*}}(%rip), %xmm0
389 ; SSE3-NEXT: por %xmm2, %xmm0
390 ; SSE3-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0]
391 ; SSE3-NEXT: pand %xmm2, %xmm0
392 ; SSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255]
393 ; SSE3-NEXT: por %xmm3, %xmm0
394 ; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
395 ; SSE3-NEXT: por {{.*}}(%rip), %xmm1
396 ; SSE3-NEXT: pand %xmm2, %xmm1
397 ; SSE3-NEXT: por %xmm3, %xmm1
400 ; SSSE3-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
402 ; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255]
403 ; SSSE3-NEXT: palignr {{.*#+}} xmm2 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm2[0]
404 ; SSSE3-NEXT: pshufb {{.*#+}} xmm2 = xmm2[15,0,1,2,3,4,5,6,7,8,9,10,11,12,13],zero
405 ; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255]
406 ; SSSE3-NEXT: por %xmm0, %xmm2
407 ; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13],zero,xmm1[15]
408 ; SSSE3-NEXT: por {{.*}}(%rip), %xmm1
409 ; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],zero
410 ; SSSE3-NEXT: por %xmm0, %xmm1
411 ; SSSE3-NEXT: movdqa %xmm2, %xmm0
414 ; SSE41-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
416 ; SSE41-NEXT: movl $255, %eax
417 ; SSE41-NEXT: pinsrb $0, %eax, %xmm0
418 ; SSE41-NEXT: pinsrb $15, %eax, %xmm0
419 ; SSE41-NEXT: pinsrb $14, %eax, %xmm1
420 ; SSE41-NEXT: pinsrb $15, %eax, %xmm1
423 ; AVX1-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
425 ; AVX1-NEXT: movl $255, %eax
426 ; AVX1-NEXT: vpinsrb $0, %eax, %xmm0, %xmm1
427 ; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
428 ; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
429 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
430 ; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
431 ; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
432 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
435 ; AVX2-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
437 ; AVX2-NEXT: movl $255, %eax
438 ; AVX2-NEXT: vpinsrb $0, %eax, %xmm0, %xmm1
439 ; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
440 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
441 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
442 ; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
443 ; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
444 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
447 ; AVX512-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
449 ; AVX512-NEXT: movl $255, %eax
450 ; AVX512-NEXT: vpinsrb $0, %eax, %xmm0, %xmm1
451 ; AVX512-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
452 ; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
453 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
454 ; AVX512-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
455 ; AVX512-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
456 ; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
458 %1 = insertelement <32 x i8> %a, i8 -1, i32 0
459 %2 = insertelement <32 x i8> %1, i8 -1, i32 15
460 %3 = insertelement <32 x i8> %2, i8 -1, i32 30
461 %4 = insertelement <32 x i8> %3, i8 -1, i32 31