1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32 --check-prefix=SSE-X32 --check-prefix=SSE2-X32
3 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64 --check-prefix=SSE-X64 --check-prefix=SSE2-X64
4 ; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X32 --check-prefix=SSE-X32 --check-prefix=SSE41-X32
5 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64 --check-prefix=SSE-X64 --check-prefix=SSE41-X64
6 ; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=X32 --check-prefix=AVX-X32
7 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64 --check-prefix=AVX-X64
8 ; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+sse -enable-legalize-types-checking | FileCheck %s --check-prefix=X64 --check-prefix=SSE-X64 --check-prefix=SSE2-X64
9 ; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+sse -enable-legalize-types-checking | FileCheck %s --check-prefix=X64 --check-prefix=SSE-X64 --check-prefix=SSE2-X64
11 define void @extract_i8_0(i8* nocapture %dst, <16 x i8> %foo) nounwind {
12 ; SSE2-X32-LABEL: extract_i8_0:
14 ; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
15 ; SSE2-X32-NEXT: movd %xmm0, %ecx
16 ; SSE2-X32-NEXT: movb %cl, (%eax)
19 ; SSE2-X64-LABEL: extract_i8_0:
21 ; SSE2-X64-NEXT: movd %xmm0, %eax
22 ; SSE2-X64-NEXT: movb %al, (%rdi)
25 ; SSE41-X32-LABEL: extract_i8_0:
27 ; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
28 ; SSE41-X32-NEXT: pextrb $0, %xmm0, (%eax)
29 ; SSE41-X32-NEXT: retl
31 ; SSE41-X64-LABEL: extract_i8_0:
33 ; SSE41-X64-NEXT: pextrb $0, %xmm0, (%rdi)
34 ; SSE41-X64-NEXT: retq
36 ; AVX-X32-LABEL: extract_i8_0:
38 ; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
39 ; AVX-X32-NEXT: vpextrb $0, %xmm0, (%eax)
42 ; AVX-X64-LABEL: extract_i8_0:
44 ; AVX-X64-NEXT: vpextrb $0, %xmm0, (%rdi)
46 %vecext = extractelement <16 x i8> %foo, i32 0
47 store i8 %vecext, i8* %dst, align 1
51 define void @extract_i8_3(i8* nocapture %dst, <16 x i8> %foo) nounwind {
52 ; SSE2-X32-LABEL: extract_i8_3:
54 ; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
55 ; SSE2-X32-NEXT: movd %xmm0, %ecx
56 ; SSE2-X32-NEXT: shrl $24, %ecx
57 ; SSE2-X32-NEXT: movb %cl, (%eax)
60 ; SSE2-X64-LABEL: extract_i8_3:
62 ; SSE2-X64-NEXT: movd %xmm0, %eax
63 ; SSE2-X64-NEXT: shrl $24, %eax
64 ; SSE2-X64-NEXT: movb %al, (%rdi)
67 ; SSE41-X32-LABEL: extract_i8_3:
69 ; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
70 ; SSE41-X32-NEXT: pextrb $3, %xmm0, (%eax)
71 ; SSE41-X32-NEXT: retl
73 ; SSE41-X64-LABEL: extract_i8_3:
75 ; SSE41-X64-NEXT: pextrb $3, %xmm0, (%rdi)
76 ; SSE41-X64-NEXT: retq
78 ; AVX-X32-LABEL: extract_i8_3:
80 ; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
81 ; AVX-X32-NEXT: vpextrb $3, %xmm0, (%eax)
84 ; AVX-X64-LABEL: extract_i8_3:
86 ; AVX-X64-NEXT: vpextrb $3, %xmm0, (%rdi)
88 %vecext = extractelement <16 x i8> %foo, i32 3
89 store i8 %vecext, i8* %dst, align 1
93 define void @extract_i8_15(i8* nocapture %dst, <16 x i8> %foo) nounwind {
94 ; SSE2-X32-LABEL: extract_i8_15:
96 ; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
97 ; SSE2-X32-NEXT: pextrw $7, %xmm0, %ecx
98 ; SSE2-X32-NEXT: movb %ch, (%eax)
101 ; SSE2-X64-LABEL: extract_i8_15:
103 ; SSE2-X64-NEXT: pextrw $7, %xmm0, %eax
104 ; SSE2-X64-NEXT: movb %ah, (%rdi)
105 ; SSE2-X64-NEXT: retq
107 ; SSE41-X32-LABEL: extract_i8_15:
108 ; SSE41-X32: # %bb.0:
109 ; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
110 ; SSE41-X32-NEXT: pextrb $15, %xmm0, (%eax)
111 ; SSE41-X32-NEXT: retl
113 ; SSE41-X64-LABEL: extract_i8_15:
114 ; SSE41-X64: # %bb.0:
115 ; SSE41-X64-NEXT: pextrb $15, %xmm0, (%rdi)
116 ; SSE41-X64-NEXT: retq
118 ; AVX-X32-LABEL: extract_i8_15:
120 ; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
121 ; AVX-X32-NEXT: vpextrb $15, %xmm0, (%eax)
124 ; AVX-X64-LABEL: extract_i8_15:
126 ; AVX-X64-NEXT: vpextrb $15, %xmm0, (%rdi)
128 %vecext = extractelement <16 x i8> %foo, i32 15
129 store i8 %vecext, i8* %dst, align 1
133 define void @extract_i16_0(i16* nocapture %dst, <8 x i16> %foo) nounwind {
134 ; SSE2-X32-LABEL: extract_i16_0:
136 ; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
137 ; SSE2-X32-NEXT: movd %xmm0, %ecx
138 ; SSE2-X32-NEXT: movw %cx, (%eax)
139 ; SSE2-X32-NEXT: retl
141 ; SSE2-X64-LABEL: extract_i16_0:
143 ; SSE2-X64-NEXT: movd %xmm0, %eax
144 ; SSE2-X64-NEXT: movw %ax, (%rdi)
145 ; SSE2-X64-NEXT: retq
147 ; SSE41-X32-LABEL: extract_i16_0:
148 ; SSE41-X32: # %bb.0:
149 ; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
150 ; SSE41-X32-NEXT: pextrw $0, %xmm0, (%eax)
151 ; SSE41-X32-NEXT: retl
153 ; SSE41-X64-LABEL: extract_i16_0:
154 ; SSE41-X64: # %bb.0:
155 ; SSE41-X64-NEXT: pextrw $0, %xmm0, (%rdi)
156 ; SSE41-X64-NEXT: retq
158 ; AVX-X32-LABEL: extract_i16_0:
160 ; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
161 ; AVX-X32-NEXT: vpextrw $0, %xmm0, (%eax)
164 ; AVX-X64-LABEL: extract_i16_0:
166 ; AVX-X64-NEXT: vpextrw $0, %xmm0, (%rdi)
168 %vecext = extractelement <8 x i16> %foo, i32 0
169 store i16 %vecext, i16* %dst, align 1
173 define void @extract_i16_7(i16* nocapture %dst, <8 x i16> %foo) nounwind {
174 ; SSE2-X32-LABEL: extract_i16_7:
176 ; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
177 ; SSE2-X32-NEXT: pextrw $7, %xmm0, %ecx
178 ; SSE2-X32-NEXT: movw %cx, (%eax)
179 ; SSE2-X32-NEXT: retl
181 ; SSE2-X64-LABEL: extract_i16_7:
183 ; SSE2-X64-NEXT: pextrw $7, %xmm0, %eax
184 ; SSE2-X64-NEXT: movw %ax, (%rdi)
185 ; SSE2-X64-NEXT: retq
187 ; SSE41-X32-LABEL: extract_i16_7:
188 ; SSE41-X32: # %bb.0:
189 ; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
190 ; SSE41-X32-NEXT: pextrw $7, %xmm0, (%eax)
191 ; SSE41-X32-NEXT: retl
193 ; SSE41-X64-LABEL: extract_i16_7:
194 ; SSE41-X64: # %bb.0:
195 ; SSE41-X64-NEXT: pextrw $7, %xmm0, (%rdi)
196 ; SSE41-X64-NEXT: retq
198 ; AVX-X32-LABEL: extract_i16_7:
200 ; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
201 ; AVX-X32-NEXT: vpextrw $7, %xmm0, (%eax)
204 ; AVX-X64-LABEL: extract_i16_7:
206 ; AVX-X64-NEXT: vpextrw $7, %xmm0, (%rdi)
208 %vecext = extractelement <8 x i16> %foo, i32 7
209 store i16 %vecext, i16* %dst, align 1
213 define void @extract_i32_0(i32* nocapture %dst, <4 x i32> %foo) nounwind {
214 ; SSE-X32-LABEL: extract_i32_0:
216 ; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
217 ; SSE-X32-NEXT: movss %xmm0, (%eax)
220 ; SSE-X64-LABEL: extract_i32_0:
222 ; SSE-X64-NEXT: movss %xmm0, (%rdi)
225 ; AVX-X32-LABEL: extract_i32_0:
227 ; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
228 ; AVX-X32-NEXT: vmovss %xmm0, (%eax)
231 ; AVX-X64-LABEL: extract_i32_0:
233 ; AVX-X64-NEXT: vmovss %xmm0, (%rdi)
235 %vecext = extractelement <4 x i32> %foo, i32 0
236 store i32 %vecext, i32* %dst, align 1
240 define void @extract_i32_3(i32* nocapture %dst, <4 x i32> %foo) nounwind {
241 ; SSE2-X32-LABEL: extract_i32_3:
243 ; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
244 ; SSE2-X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
245 ; SSE2-X32-NEXT: movd %xmm0, (%eax)
246 ; SSE2-X32-NEXT: retl
248 ; SSE2-X64-LABEL: extract_i32_3:
250 ; SSE2-X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
251 ; SSE2-X64-NEXT: movd %xmm0, (%rdi)
252 ; SSE2-X64-NEXT: retq
254 ; SSE41-X32-LABEL: extract_i32_3:
255 ; SSE41-X32: # %bb.0:
256 ; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
257 ; SSE41-X32-NEXT: extractps $3, %xmm0, (%eax)
258 ; SSE41-X32-NEXT: retl
260 ; SSE41-X64-LABEL: extract_i32_3:
261 ; SSE41-X64: # %bb.0:
262 ; SSE41-X64-NEXT: extractps $3, %xmm0, (%rdi)
263 ; SSE41-X64-NEXT: retq
265 ; AVX-X32-LABEL: extract_i32_3:
267 ; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
268 ; AVX-X32-NEXT: vextractps $3, %xmm0, (%eax)
271 ; AVX-X64-LABEL: extract_i32_3:
273 ; AVX-X64-NEXT: vextractps $3, %xmm0, (%rdi)
275 %vecext = extractelement <4 x i32> %foo, i32 3
276 store i32 %vecext, i32* %dst, align 1
280 define void @extract_i64_0(i64* nocapture %dst, <2 x i64> %foo) nounwind {
281 ; SSE-X32-LABEL: extract_i64_0:
283 ; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
284 ; SSE-X32-NEXT: movlps %xmm0, (%eax)
287 ; SSE-X64-LABEL: extract_i64_0:
289 ; SSE-X64-NEXT: movlps %xmm0, (%rdi)
292 ; AVX-X32-LABEL: extract_i64_0:
294 ; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
295 ; AVX-X32-NEXT: vmovlps %xmm0, (%eax)
298 ; AVX-X64-LABEL: extract_i64_0:
300 ; AVX-X64-NEXT: vmovlps %xmm0, (%rdi)
302 %vecext = extractelement <2 x i64> %foo, i32 0
303 store i64 %vecext, i64* %dst, align 1
307 define void @extract_i64_1(i64* nocapture %dst, <2 x i64> %foo) nounwind {
308 ; SSE-X32-LABEL: extract_i64_1:
310 ; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
311 ; SSE-X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
312 ; SSE-X32-NEXT: movq %xmm0, (%eax)
315 ; SSE2-X64-LABEL: extract_i64_1:
317 ; SSE2-X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
318 ; SSE2-X64-NEXT: movq %xmm0, (%rdi)
319 ; SSE2-X64-NEXT: retq
321 ; SSE41-X64-LABEL: extract_i64_1:
322 ; SSE41-X64: # %bb.0:
323 ; SSE41-X64-NEXT: pextrq $1, %xmm0, (%rdi)
324 ; SSE41-X64-NEXT: retq
326 ; AVX-X32-LABEL: extract_i64_1:
328 ; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
329 ; AVX-X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
330 ; AVX-X32-NEXT: vmovlps %xmm0, (%eax)
333 ; AVX-X64-LABEL: extract_i64_1:
335 ; AVX-X64-NEXT: vpextrq $1, %xmm0, (%rdi)
337 %vecext = extractelement <2 x i64> %foo, i32 1
338 store i64 %vecext, i64* %dst, align 1
342 define void @extract_f32_0(float* nocapture %dst, <4 x float> %foo) nounwind {
343 ; SSE-X32-LABEL: extract_f32_0:
345 ; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
346 ; SSE-X32-NEXT: movss %xmm0, (%eax)
349 ; SSE-X64-LABEL: extract_f32_0:
351 ; SSE-X64-NEXT: movss %xmm0, (%rdi)
354 ; AVX-X32-LABEL: extract_f32_0:
356 ; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
357 ; AVX-X32-NEXT: vmovss %xmm0, (%eax)
360 ; AVX-X64-LABEL: extract_f32_0:
362 ; AVX-X64-NEXT: vmovss %xmm0, (%rdi)
364 %vecext = extractelement <4 x float> %foo, i32 0
365 store float %vecext, float* %dst, align 1
369 define void @extract_f32_3(float* nocapture %dst, <4 x float> %foo) nounwind {
370 ; SSE2-X32-LABEL: extract_f32_3:
372 ; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
373 ; SSE2-X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
374 ; SSE2-X32-NEXT: movss %xmm0, (%eax)
375 ; SSE2-X32-NEXT: retl
377 ; SSE2-X64-LABEL: extract_f32_3:
379 ; SSE2-X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
380 ; SSE2-X64-NEXT: movss %xmm0, (%rdi)
381 ; SSE2-X64-NEXT: retq
383 ; SSE41-X32-LABEL: extract_f32_3:
384 ; SSE41-X32: # %bb.0:
385 ; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
386 ; SSE41-X32-NEXT: extractps $3, %xmm0, (%eax)
387 ; SSE41-X32-NEXT: retl
389 ; SSE41-X64-LABEL: extract_f32_3:
390 ; SSE41-X64: # %bb.0:
391 ; SSE41-X64-NEXT: extractps $3, %xmm0, (%rdi)
392 ; SSE41-X64-NEXT: retq
394 ; AVX-X32-LABEL: extract_f32_3:
396 ; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
397 ; AVX-X32-NEXT: vextractps $3, %xmm0, (%eax)
400 ; AVX-X64-LABEL: extract_f32_3:
402 ; AVX-X64-NEXT: vextractps $3, %xmm0, (%rdi)
404 %vecext = extractelement <4 x float> %foo, i32 3
405 store float %vecext, float* %dst, align 1
409 define void @extract_f64_0(double* nocapture %dst, <2 x double> %foo) nounwind {
410 ; SSE-X32-LABEL: extract_f64_0:
412 ; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
413 ; SSE-X32-NEXT: movlps %xmm0, (%eax)
416 ; SSE-X64-LABEL: extract_f64_0:
418 ; SSE-X64-NEXT: movlps %xmm0, (%rdi)
421 ; AVX-X32-LABEL: extract_f64_0:
423 ; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
424 ; AVX-X32-NEXT: vmovlps %xmm0, (%eax)
427 ; AVX-X64-LABEL: extract_f64_0:
429 ; AVX-X64-NEXT: vmovlps %xmm0, (%rdi)
431 %vecext = extractelement <2 x double> %foo, i32 0
432 store double %vecext, double* %dst, align 1
436 define void @extract_f64_1(double* nocapture %dst, <2 x double> %foo) nounwind {
437 ; SSE-X32-LABEL: extract_f64_1:
439 ; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
440 ; SSE-X32-NEXT: movhps %xmm0, (%eax)
443 ; SSE-X64-LABEL: extract_f64_1:
445 ; SSE-X64-NEXT: movhps %xmm0, (%rdi)
448 ; AVX-X32-LABEL: extract_f64_1:
450 ; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
451 ; AVX-X32-NEXT: vmovhps %xmm0, (%eax)
454 ; AVX-X64-LABEL: extract_f64_1:
456 ; AVX-X64-NEXT: vmovhps %xmm0, (%rdi)
458 %vecext = extractelement <2 x double> %foo, i32 1
459 store double %vecext, double* %dst, align 1
463 define void @extract_f128_0(fp128* nocapture %dst, <2 x fp128> %foo) nounwind {
464 ; SSE-X32-LABEL: extract_f128_0:
466 ; SSE-X32-NEXT: pushl %edi
467 ; SSE-X32-NEXT: pushl %esi
468 ; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
469 ; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
470 ; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %edx
471 ; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %esi
472 ; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %edi
473 ; SSE-X32-NEXT: movl %esi, 12(%edi)
474 ; SSE-X32-NEXT: movl %edx, 8(%edi)
475 ; SSE-X32-NEXT: movl %ecx, 4(%edi)
476 ; SSE-X32-NEXT: movl %eax, (%edi)
477 ; SSE-X32-NEXT: popl %esi
478 ; SSE-X32-NEXT: popl %edi
481 ; SSE-X64-LABEL: extract_f128_0:
483 ; SSE-X64-NEXT: movups %xmm0, (%rdi)
486 ; AVX-X32-LABEL: extract_f128_0:
488 ; AVX-X32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0
489 ; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
490 ; AVX-X32-NEXT: vmovups %xmm0, (%eax)
493 ; AVX-X64-LABEL: extract_f128_0:
495 ; AVX-X64-NEXT: vmovups %xmm0, (%rdi)
497 %vecext = extractelement <2 x fp128> %foo, i32 0
498 store fp128 %vecext, fp128* %dst, align 1
502 define void @extract_f128_1(fp128* nocapture %dst, <2 x fp128> %foo) nounwind {
503 ; SSE-X32-LABEL: extract_f128_1:
505 ; SSE-X32-NEXT: pushl %edi
506 ; SSE-X32-NEXT: pushl %esi
507 ; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
508 ; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
509 ; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %edx
510 ; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %esi
511 ; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %edi
512 ; SSE-X32-NEXT: movl %esi, 12(%edi)
513 ; SSE-X32-NEXT: movl %edx, 8(%edi)
514 ; SSE-X32-NEXT: movl %ecx, 4(%edi)
515 ; SSE-X32-NEXT: movl %eax, (%edi)
516 ; SSE-X32-NEXT: popl %esi
517 ; SSE-X32-NEXT: popl %edi
520 ; SSE-X64-LABEL: extract_f128_1:
522 ; SSE-X64-NEXT: movups %xmm1, (%rdi)
525 ; AVX-X32-LABEL: extract_f128_1:
527 ; AVX-X32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0
528 ; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
529 ; AVX-X32-NEXT: vmovups %xmm0, (%eax)
532 ; AVX-X64-LABEL: extract_f128_1:
534 ; AVX-X64-NEXT: vmovups %xmm1, (%rdi)
536 %vecext = extractelement <2 x fp128> %foo, i32 1
537 store fp128 %vecext, fp128* %dst, align 1
541 define void @extract_i8_undef(i8* nocapture %dst, <16 x i8> %foo) nounwind {
542 ; X32-LABEL: extract_i8_undef:
546 ; X64-LABEL: extract_i8_undef:
549 %vecext = extractelement <16 x i8> %foo, i32 16 ; undef
550 store i8 %vecext, i8* %dst, align 1
554 define void @extract_i16_undef(i16* nocapture %dst, <8 x i16> %foo) nounwind {
555 ; X32-LABEL: extract_i16_undef:
559 ; X64-LABEL: extract_i16_undef:
562 %vecext = extractelement <8 x i16> %foo, i32 9 ; undef
563 store i16 %vecext, i16* %dst, align 1
567 define void @extract_i32_undef(i32* nocapture %dst, <4 x i32> %foo) nounwind {
568 ; X32-LABEL: extract_i32_undef:
572 ; X64-LABEL: extract_i32_undef:
575 %vecext = extractelement <4 x i32> %foo, i32 6 ; undef
576 store i32 %vecext, i32* %dst, align 1
580 define void @extract_i64_undef(i64* nocapture %dst, <2 x i64> %foo) nounwind {
581 ; X32-LABEL: extract_i64_undef:
585 ; X64-LABEL: extract_i64_undef:
588 %vecext = extractelement <2 x i64> %foo, i32 2 ; undef
589 store i64 %vecext, i64* %dst, align 1
593 define void @extract_f32_undef(float* nocapture %dst, <4 x float> %foo) nounwind {
594 ; X32-LABEL: extract_f32_undef:
598 ; X64-LABEL: extract_f32_undef:
601 %vecext = extractelement <4 x float> %foo, i32 6 ; undef
602 store float %vecext, float* %dst, align 1
606 define void @extract_f64_undef(double* nocapture %dst, <2 x double> %foo) nounwind {
607 ; X32-LABEL: extract_f64_undef:
611 ; X64-LABEL: extract_f64_undef:
614 %vecext = extractelement <2 x double> %foo, i32 2 ; undef
615 store double %vecext, double* %dst, align 1
619 define void @extract_f128_undef(fp128* nocapture %dst, <2 x fp128> %foo) nounwind {
620 ; X32-LABEL: extract_f128_undef:
624 ; X64-LABEL: extract_f128_undef:
627 %vecext = extractelement <2 x fp128> %foo, i32 2 ; undef
628 store fp128 %vecext, fp128* %dst, align 1