1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s
4 define <8 x float> @A(<8 x float> %a) nounwind uwtable readnone ssp {
6 ; CHECK: ## %bb.0: ## %entry
7 ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
10 %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 8, i32 8, i32 8>
11 ret <8 x float> %shuffle
14 define <4 x double> @B(<4 x double> %a) nounwind uwtable readnone ssp {
16 ; CHECK: ## %bb.0: ## %entry
17 ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
20 %shuffle = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> <i32 2, i32 3, i32 4, i32 4>
21 ret <4 x double> %shuffle
24 define void @t0(ptr nocapture %addr, <8 x float> %a) nounwind uwtable ssp {
26 ; CHECK: ## %bb.0: ## %entry
27 ; CHECK-NEXT: vextractf128 $1, %ymm0, (%rdi)
28 ; CHECK-NEXT: vzeroupper
31 %0 = tail call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %a, i8 1)
32 store <4 x float> %0, ptr %addr, align 16
36 define void @t2(ptr nocapture %addr, <4 x double> %a) nounwind uwtable ssp {
38 ; CHECK: ## %bb.0: ## %entry
39 ; CHECK-NEXT: vextractf128 $1, %ymm0, (%rdi)
40 ; CHECK-NEXT: vzeroupper
43 %0 = tail call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a, i8 1)
44 store <2 x double> %0, ptr %addr, align 16
48 define void @t4(ptr nocapture %addr, <4 x i64> %a) nounwind uwtable ssp {
50 ; CHECK: ## %bb.0: ## %entry
51 ; CHECK-NEXT: vextractf128 $1, %ymm0, (%rdi)
52 ; CHECK-NEXT: vzeroupper
55 %0 = bitcast <4 x i64> %a to <8 x i32>
56 %1 = tail call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %0, i8 1)
57 %2 = bitcast <4 x i32> %1 to <2 x i64>
58 store <2 x i64> %2, ptr %addr, align 16
62 define void @t5(ptr nocapture %addr, <8 x float> %a) nounwind uwtable ssp {
64 ; CHECK: ## %bb.0: ## %entry
65 ; CHECK-NEXT: vmovaps %xmm0, (%rdi)
66 ; CHECK-NEXT: vzeroupper
69 %0 = tail call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %a, i8 0)
70 store <4 x float> %0, ptr %addr, align 16
74 define void @t6(ptr nocapture %addr, <4 x double> %a) nounwind uwtable ssp {
76 ; CHECK: ## %bb.0: ## %entry
77 ; CHECK-NEXT: vmovaps %xmm0, (%rdi)
78 ; CHECK-NEXT: vzeroupper
81 %0 = tail call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a, i8 0)
82 store <2 x double> %0, ptr %addr, align 16
86 define void @t7(ptr nocapture %addr, <4 x i64> %a) nounwind uwtable ssp {
88 ; CHECK: ## %bb.0: ## %entry
89 ; CHECK-NEXT: vmovaps %xmm0, (%rdi)
90 ; CHECK-NEXT: vzeroupper
93 %0 = bitcast <4 x i64> %a to <8 x i32>
94 %1 = tail call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %0, i8 0)
95 %2 = bitcast <4 x i32> %1 to <2 x i64>
96 store <2 x i64> %2, ptr %addr, align 16
100 define void @t8(ptr nocapture %addr, <4 x i64> %a) nounwind uwtable ssp {
102 ; CHECK: ## %bb.0: ## %entry
103 ; CHECK-NEXT: vmovups %xmm0, (%rdi)
104 ; CHECK-NEXT: vzeroupper
107 %0 = bitcast <4 x i64> %a to <8 x i32>
108 %1 = tail call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %0, i8 0)
109 %2 = bitcast <4 x i32> %1 to <2 x i64>
110 store <2 x i64> %2, ptr %addr, align 1
115 define void @t9(ptr %p) {
118 ; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
119 ; CHECK-NEXT: vmovups %ymm0, (%rdi)
120 ; CHECK-NEXT: vzeroupper
123 %q = getelementptr i64, ptr %p, i64 1
125 %r = getelementptr i64, ptr %p, i64 2
127 %s = getelementptr i64, ptr %p, i64 3
132 declare <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double>, i8) nounwind readnone
133 declare <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float>, i8) nounwind readnone
134 declare <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32>, i8) nounwind readnone