1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX,AVX2,AVX2-SLOW
8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefixes=ALL,AVX,AVX2,AVX2-FAST
10 define <2 x double> @insert_v2f64_z1(<2 x double> %a) {
11 ; SSE2-LABEL: insert_v2f64_z1:
13 ; SSE2-NEXT: xorpd %xmm1, %xmm1
14 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
17 ; SSE3-LABEL: insert_v2f64_z1:
19 ; SSE3-NEXT: xorpd %xmm1, %xmm1
20 ; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
23 ; SSSE3-LABEL: insert_v2f64_z1:
25 ; SSSE3-NEXT: xorpd %xmm1, %xmm1
26 ; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
29 ; SSE41-LABEL: insert_v2f64_z1:
31 ; SSE41-NEXT: xorps %xmm1, %xmm1
32 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
35 ; AVX-LABEL: insert_v2f64_z1:
37 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
38 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
40 %1 = insertelement <2 x double> %a, double 0.0, i32 0
44 define <4 x double> @insert_v4f64_0zz3(<4 x double> %a) {
45 ; SSE2-LABEL: insert_v4f64_0zz3:
47 ; SSE2-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
48 ; SSE2-NEXT: xorpd %xmm2, %xmm2
49 ; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
52 ; SSE3-LABEL: insert_v4f64_0zz3:
54 ; SSE3-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
55 ; SSE3-NEXT: xorpd %xmm2, %xmm2
56 ; SSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
59 ; SSSE3-LABEL: insert_v4f64_0zz3:
61 ; SSSE3-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
62 ; SSSE3-NEXT: xorpd %xmm2, %xmm2
63 ; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
66 ; SSE41-LABEL: insert_v4f64_0zz3:
68 ; SSE41-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
69 ; SSE41-NEXT: xorps %xmm2, %xmm2
70 ; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
73 ; AVX-LABEL: insert_v4f64_0zz3:
75 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
76 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
78 %1 = insertelement <4 x double> %a, double 0.0, i32 1
79 %2 = insertelement <4 x double> %1, double 0.0, i32 2
83 define <2 x i64> @insert_v2i64_z1(<2 x i64> %a) {
84 ; SSE2-LABEL: insert_v2i64_z1:
86 ; SSE2-NEXT: xorpd %xmm1, %xmm1
87 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
90 ; SSE3-LABEL: insert_v2i64_z1:
92 ; SSE3-NEXT: xorpd %xmm1, %xmm1
93 ; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
96 ; SSSE3-LABEL: insert_v2i64_z1:
98 ; SSSE3-NEXT: xorpd %xmm1, %xmm1
99 ; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
102 ; SSE41-LABEL: insert_v2i64_z1:
104 ; SSE41-NEXT: xorps %xmm1, %xmm1
105 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
108 ; AVX-LABEL: insert_v2i64_z1:
110 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
111 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
113 %1 = insertelement <2 x i64> %a, i64 0, i32 0
117 define <4 x i64> @insert_v4i64_01z3(<4 x i64> %a) {
118 ; SSE2-LABEL: insert_v4i64_01z3:
120 ; SSE2-NEXT: xorpd %xmm2, %xmm2
121 ; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
124 ; SSE3-LABEL: insert_v4i64_01z3:
126 ; SSE3-NEXT: xorpd %xmm2, %xmm2
127 ; SSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
130 ; SSSE3-LABEL: insert_v4i64_01z3:
132 ; SSSE3-NEXT: xorpd %xmm2, %xmm2
133 ; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
136 ; SSE41-LABEL: insert_v4i64_01z3:
138 ; SSE41-NEXT: xorps %xmm2, %xmm2
139 ; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
142 ; AVX-LABEL: insert_v4i64_01z3:
144 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
145 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
147 %1 = insertelement <4 x i64> %a, i64 0, i32 2
151 define <4 x float> @insert_v4f32_01z3(<4 x float> %a) {
152 ; SSE2-LABEL: insert_v4f32_01z3:
154 ; SSE2-NEXT: xorps %xmm1, %xmm1
155 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
156 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
159 ; SSE3-LABEL: insert_v4f32_01z3:
161 ; SSE3-NEXT: xorps %xmm1, %xmm1
162 ; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
163 ; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
166 ; SSSE3-LABEL: insert_v4f32_01z3:
168 ; SSSE3-NEXT: xorps %xmm1, %xmm1
169 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
170 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
173 ; SSE41-LABEL: insert_v4f32_01z3:
175 ; SSE41-NEXT: xorps %xmm1, %xmm1
176 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
179 ; AVX-LABEL: insert_v4f32_01z3:
181 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
182 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
184 %1 = insertelement <4 x float> %a, float 0.0, i32 2
188 define <8 x float> @insert_v8f32_z12345z7(<8 x float> %a) {
189 ; SSE2-LABEL: insert_v8f32_z12345z7:
191 ; SSE2-NEXT: xorps %xmm2, %xmm2
192 ; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
193 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[3,0]
194 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,2]
197 ; SSE3-LABEL: insert_v8f32_z12345z7:
199 ; SSE3-NEXT: xorps %xmm2, %xmm2
200 ; SSE3-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
201 ; SSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[3,0]
202 ; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,2]
205 ; SSSE3-LABEL: insert_v8f32_z12345z7:
207 ; SSSE3-NEXT: xorps %xmm2, %xmm2
208 ; SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
209 ; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[3,0]
210 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,2]
213 ; SSE41-LABEL: insert_v8f32_z12345z7:
215 ; SSE41-NEXT: xorps %xmm2, %xmm2
216 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
217 ; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
220 ; AVX-LABEL: insert_v8f32_z12345z7:
222 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
223 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
225 %1 = insertelement <8 x float> %a, float 0.0, i32 0
226 %2 = insertelement <8 x float> %1, float 0.0, i32 6
230 define <4 x i32> @insert_v4i32_01z3(<4 x i32> %a) {
231 ; SSE2-LABEL: insert_v4i32_01z3:
233 ; SSE2-NEXT: xorps %xmm1, %xmm1
234 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
235 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
238 ; SSE3-LABEL: insert_v4i32_01z3:
240 ; SSE3-NEXT: xorps %xmm1, %xmm1
241 ; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
242 ; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
245 ; SSSE3-LABEL: insert_v4i32_01z3:
247 ; SSSE3-NEXT: xorps %xmm1, %xmm1
248 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
249 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
252 ; SSE41-LABEL: insert_v4i32_01z3:
254 ; SSE41-NEXT: xorps %xmm1, %xmm1
255 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
258 ; AVX-LABEL: insert_v4i32_01z3:
260 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
261 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
263 %1 = insertelement <4 x i32> %a, i32 0, i32 2
267 define <8 x i32> @insert_v8i32_z12345z7(<8 x i32> %a) {
268 ; SSE2-LABEL: insert_v8i32_z12345z7:
270 ; SSE2-NEXT: xorps %xmm2, %xmm2
271 ; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
272 ; SSE2-NEXT: xorps %xmm2, %xmm2
273 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[3,0]
274 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,2]
277 ; SSE3-LABEL: insert_v8i32_z12345z7:
279 ; SSE3-NEXT: xorps %xmm2, %xmm2
280 ; SSE3-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
281 ; SSE3-NEXT: xorps %xmm2, %xmm2
282 ; SSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[3,0]
283 ; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,2]
286 ; SSSE3-LABEL: insert_v8i32_z12345z7:
288 ; SSSE3-NEXT: xorps %xmm2, %xmm2
289 ; SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
290 ; SSSE3-NEXT: xorps %xmm2, %xmm2
291 ; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[3,0]
292 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,2]
295 ; SSE41-LABEL: insert_v8i32_z12345z7:
297 ; SSE41-NEXT: xorps %xmm2, %xmm2
298 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
299 ; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
302 ; AVX-LABEL: insert_v8i32_z12345z7:
304 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
305 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
307 %1 = insertelement <8 x i32> %a, i32 0, i32 0
308 %2 = insertelement <8 x i32> %1, i32 0, i32 6
312 define <8 x i16> @insert_v8i16_z12345z7(<8 x i16> %a) {
313 ; SSE2-LABEL: insert_v8i16_z12345z7:
315 ; SSE2-NEXT: xorl %eax, %eax
316 ; SSE2-NEXT: pinsrw $0, %eax, %xmm0
317 ; SSE2-NEXT: pinsrw $6, %eax, %xmm0
320 ; SSE3-LABEL: insert_v8i16_z12345z7:
322 ; SSE3-NEXT: xorl %eax, %eax
323 ; SSE3-NEXT: pinsrw $0, %eax, %xmm0
324 ; SSE3-NEXT: pinsrw $6, %eax, %xmm0
327 ; SSSE3-LABEL: insert_v8i16_z12345z7:
329 ; SSSE3-NEXT: xorl %eax, %eax
330 ; SSSE3-NEXT: pinsrw $0, %eax, %xmm0
331 ; SSSE3-NEXT: pinsrw $6, %eax, %xmm0
334 ; SSE41-LABEL: insert_v8i16_z12345z7:
336 ; SSE41-NEXT: pxor %xmm1, %xmm1
337 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7]
340 ; AVX-LABEL: insert_v8i16_z12345z7:
342 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
343 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7]
345 %1 = insertelement <8 x i16> %a, i16 0, i32 0
346 %2 = insertelement <8 x i16> %1, i16 0, i32 6
350 define <16 x i16> @insert_v16i16_z12345z789ABCDEz(<16 x i16> %a) {
351 ; SSE2-LABEL: insert_v16i16_z12345z789ABCDEz:
353 ; SSE2-NEXT: xorl %eax, %eax
354 ; SSE2-NEXT: pinsrw $0, %eax, %xmm0
355 ; SSE2-NEXT: pinsrw $6, %eax, %xmm0
356 ; SSE2-NEXT: pinsrw $7, %eax, %xmm1
359 ; SSE3-LABEL: insert_v16i16_z12345z789ABCDEz:
361 ; SSE3-NEXT: xorl %eax, %eax
362 ; SSE3-NEXT: pinsrw $0, %eax, %xmm0
363 ; SSE3-NEXT: pinsrw $6, %eax, %xmm0
364 ; SSE3-NEXT: pinsrw $7, %eax, %xmm1
367 ; SSSE3-LABEL: insert_v16i16_z12345z789ABCDEz:
369 ; SSSE3-NEXT: xorl %eax, %eax
370 ; SSSE3-NEXT: pinsrw $0, %eax, %xmm0
371 ; SSSE3-NEXT: pinsrw $6, %eax, %xmm0
372 ; SSSE3-NEXT: pinsrw $7, %eax, %xmm1
375 ; SSE41-LABEL: insert_v16i16_z12345z789ABCDEz:
377 ; SSE41-NEXT: pxor %xmm2, %xmm2
378 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3,4,5],xmm2[6],xmm0[7]
379 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
382 ; AVX-LABEL: insert_v16i16_z12345z789ABCDEz:
384 ; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
386 %1 = insertelement <16 x i16> %a, i16 0, i32 0
387 %2 = insertelement <16 x i16> %1, i16 0, i32 6
388 %3 = insertelement <16 x i16> %2, i16 0, i32 15
392 define <16 x i8> @insert_v16i8_z123456789ABCDEz(<16 x i8> %a) {
393 ; SSE2-LABEL: insert_v16i8_z123456789ABCDEz:
395 ; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
398 ; SSE3-LABEL: insert_v16i8_z123456789ABCDEz:
400 ; SSE3-NEXT: andps {{.*}}(%rip), %xmm0
403 ; SSSE3-LABEL: insert_v16i8_z123456789ABCDEz:
405 ; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
408 ; SSE41-LABEL: insert_v16i8_z123456789ABCDEz:
410 ; SSE41-NEXT: xorl %eax, %eax
411 ; SSE41-NEXT: pinsrb $0, %eax, %xmm0
412 ; SSE41-NEXT: pinsrb $15, %eax, %xmm0
415 ; AVX1-LABEL: insert_v16i8_z123456789ABCDEz:
417 ; AVX1-NEXT: xorl %eax, %eax
418 ; AVX1-NEXT: vpinsrb $0, %eax, %xmm0, %xmm0
419 ; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
422 ; AVX2-SLOW-LABEL: insert_v16i8_z123456789ABCDEz:
423 ; AVX2-SLOW: # %bb.0:
424 ; AVX2-SLOW-NEXT: xorl %eax, %eax
425 ; AVX2-SLOW-NEXT: vpinsrb $0, %eax, %xmm0, %xmm0
426 ; AVX2-SLOW-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
427 ; AVX2-SLOW-NEXT: retq
429 ; AVX2-FAST-LABEL: insert_v16i8_z123456789ABCDEz:
430 ; AVX2-FAST: # %bb.0:
431 ; AVX2-FAST-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
432 ; AVX2-FAST-NEXT: retq
433 %1 = insertelement <16 x i8> %a, i8 0, i32 0
434 %2 = insertelement <16 x i8> %1, i8 0, i32 15
438 define <32 x i8> @insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz(<32 x i8> %a) {
439 ; SSE2-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
441 ; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
442 ; SSE2-NEXT: andps {{.*}}(%rip), %xmm1
445 ; SSE3-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
447 ; SSE3-NEXT: andps {{.*}}(%rip), %xmm0
448 ; SSE3-NEXT: andps {{.*}}(%rip), %xmm1
451 ; SSSE3-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
453 ; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
454 ; SSSE3-NEXT: andps {{.*}}(%rip), %xmm1
457 ; SSE41-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
459 ; SSE41-NEXT: xorl %eax, %eax
460 ; SSE41-NEXT: pinsrb $0, %eax, %xmm0
461 ; SSE41-NEXT: pinsrb $15, %eax, %xmm0
462 ; SSE41-NEXT: pxor %xmm2, %xmm2
463 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
466 ; AVX1-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
468 ; AVX1-NEXT: xorl %eax, %eax
469 ; AVX1-NEXT: vpinsrb $0, %eax, %xmm0, %xmm1
470 ; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
471 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
472 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
473 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm2[7]
474 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
477 ; AVX2-SLOW-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
478 ; AVX2-SLOW: # %bb.0:
479 ; AVX2-SLOW-NEXT: xorl %eax, %eax
480 ; AVX2-SLOW-NEXT: vpinsrb $0, %eax, %xmm0, %xmm1
481 ; AVX2-SLOW-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
482 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm0
483 ; AVX2-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
484 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm2[7]
485 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
486 ; AVX2-SLOW-NEXT: retq
488 ; AVX2-FAST-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
489 ; AVX2-FAST: # %bb.0:
490 ; AVX2-FAST-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm1
491 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm0, %xmm0
492 ; AVX2-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2
493 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm2[7]
494 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
495 ; AVX2-FAST-NEXT: retq
496 %1 = insertelement <32 x i8> %a, i8 0, i32 0
497 %2 = insertelement <32 x i8> %1, i8 0, i32 15
498 %3 = insertelement <32 x i8> %2, i8 0, i32 30
499 %4 = insertelement <32 x i8> %3, i8 0, i32 31
503 define <4 x i32> @PR41512(i32 %x, i32 %y) {
504 ; SSE-LABEL: PR41512:
506 ; SSE-NEXT: movd %edi, %xmm0
507 ; SSE-NEXT: movd %esi, %xmm1
508 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
511 ; AVX-LABEL: PR41512:
513 ; AVX-NEXT: vmovd %edi, %xmm0
514 ; AVX-NEXT: vmovd %esi, %xmm1
515 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
517 %ins1 = insertelement <4 x i32> <i32 undef, i32 0, i32 undef, i32 undef>, i32 %x, i32 0
518 %ins2 = insertelement <4 x i32> <i32 undef, i32 0, i32 undef, i32 undef>, i32 %y, i32 0
519 %r = shufflevector <4 x i32> %ins1, <4 x i32> %ins2, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
523 define <4 x i64> @PR41512_v4i64(i64 %x, i64 %y) {
524 ; SSE-LABEL: PR41512_v4i64:
526 ; SSE-NEXT: movq %rdi, %xmm0
527 ; SSE-NEXT: movq %rsi, %xmm1
530 ; AVX1-LABEL: PR41512_v4i64:
532 ; AVX1-NEXT: vmovq %rdi, %xmm0
533 ; AVX1-NEXT: vmovq %rsi, %xmm1
534 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
537 ; AVX2-LABEL: PR41512_v4i64:
539 ; AVX2-NEXT: vmovq %rdi, %xmm0
540 ; AVX2-NEXT: vmovq %rsi, %xmm1
541 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
543 %ins1 = insertelement <4 x i64> <i64 undef, i64 0, i64 undef, i64 undef>, i64 %x, i32 0
544 %ins2 = insertelement <4 x i64> <i64 undef, i64 0, i64 undef, i64 undef>, i64 %y, i32 0
545 %r = shufflevector <4 x i64> %ins1, <4 x i64> %ins2, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
549 define <8 x float> @PR41512_v8f32(float %x, float %y) {
550 ; SSE2-LABEL: PR41512_v8f32:
552 ; SSE2-NEXT: xorps %xmm2, %xmm2
553 ; SSE2-NEXT: xorps %xmm3, %xmm3
554 ; SSE2-NEXT: movss {{.*#+}} xmm3 = xmm0[0],xmm3[1,2,3]
555 ; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
556 ; SSE2-NEXT: movaps %xmm3, %xmm0
557 ; SSE2-NEXT: movaps %xmm2, %xmm1
560 ; SSE3-LABEL: PR41512_v8f32:
562 ; SSE3-NEXT: xorps %xmm2, %xmm2
563 ; SSE3-NEXT: xorps %xmm3, %xmm3
564 ; SSE3-NEXT: movss {{.*#+}} xmm3 = xmm0[0],xmm3[1,2,3]
565 ; SSE3-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
566 ; SSE3-NEXT: movaps %xmm3, %xmm0
567 ; SSE3-NEXT: movaps %xmm2, %xmm1
570 ; SSSE3-LABEL: PR41512_v8f32:
572 ; SSSE3-NEXT: xorps %xmm2, %xmm2
573 ; SSSE3-NEXT: xorps %xmm3, %xmm3
574 ; SSSE3-NEXT: movss {{.*#+}} xmm3 = xmm0[0],xmm3[1,2,3]
575 ; SSSE3-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
576 ; SSSE3-NEXT: movaps %xmm3, %xmm0
577 ; SSSE3-NEXT: movaps %xmm2, %xmm1
580 ; SSE41-LABEL: PR41512_v8f32:
582 ; SSE41-NEXT: xorps %xmm2, %xmm2
583 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
584 ; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3]
587 ; AVX-LABEL: PR41512_v8f32:
589 ; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2
590 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
591 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3]
592 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
594 %ins1 = insertelement <8 x float> zeroinitializer, float %x, i32 0
595 %ins2 = insertelement <8 x float> zeroinitializer, float %y, i32 0
596 %r = shufflevector <8 x float> %ins1, <8 x float> %ins2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
600 define <4 x i32> @PR41512_loads(i32* %p1, i32* %p2) {
601 ; SSE-LABEL: PR41512_loads:
603 ; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
604 ; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
605 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
608 ; AVX-LABEL: PR41512_loads:
610 ; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
611 ; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
612 ; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
614 %x = load i32, i32* %p1
615 %y = load i32, i32* %p2
616 %ins1 = insertelement <4 x i32> <i32 undef, i32 0, i32 undef, i32 undef>, i32 %x, i32 0
617 %ins2 = insertelement <4 x i32> <i32 undef, i32 0, i32 undef, i32 undef>, i32 %y, i32 0
618 %r = shufflevector <4 x i32> %ins1, <4 x i32> %ins2, <4 x i32> <i32 0, i32 1, i32 4, i32 5>