1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X86
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
5 ; When extracting multiple consecutive elements from a larger
6 ; vector into a smaller one, do it efficiently. We should use
7 ; an EXTRACT_SUBVECTOR node internally rather than a bunch of
8 ; single element extractions.
10 ; Extracting the low elements only requires using the right kind of store.
11 define void @low_v8f32_to_v4f32(<8 x float> %v, ptr %ptr) {
12 ; X86-LABEL: low_v8f32_to_v4f32:
14 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
15 ; X86-NEXT: vmovaps %xmm0, (%eax)
16 ; X86-NEXT: vzeroupper
19 ; X64-LABEL: low_v8f32_to_v4f32:
21 ; X64-NEXT: vmovaps %xmm0, (%rdi)
22 ; X64-NEXT: vzeroupper
24 %ext0 = extractelement <8 x float> %v, i32 0
25 %ext1 = extractelement <8 x float> %v, i32 1
26 %ext2 = extractelement <8 x float> %v, i32 2
27 %ext3 = extractelement <8 x float> %v, i32 3
28 %ins0 = insertelement <4 x float> undef, float %ext0, i32 0
29 %ins1 = insertelement <4 x float> %ins0, float %ext1, i32 1
30 %ins2 = insertelement <4 x float> %ins1, float %ext2, i32 2
31 %ins3 = insertelement <4 x float> %ins2, float %ext3, i32 3
32 store <4 x float> %ins3, ptr %ptr, align 16
36 ; Extracting the high elements requires just one AVX instruction.
37 define void @high_v8f32_to_v4f32(<8 x float> %v, ptr %ptr) {
38 ; X86-LABEL: high_v8f32_to_v4f32:
40 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
41 ; X86-NEXT: vextractf128 $1, %ymm0, (%eax)
42 ; X86-NEXT: vzeroupper
45 ; X64-LABEL: high_v8f32_to_v4f32:
47 ; X64-NEXT: vextractf128 $1, %ymm0, (%rdi)
48 ; X64-NEXT: vzeroupper
50 %ext0 = extractelement <8 x float> %v, i32 4
51 %ext1 = extractelement <8 x float> %v, i32 5
52 %ext2 = extractelement <8 x float> %v, i32 6
53 %ext3 = extractelement <8 x float> %v, i32 7
54 %ins0 = insertelement <4 x float> undef, float %ext0, i32 0
55 %ins1 = insertelement <4 x float> %ins0, float %ext1, i32 1
56 %ins2 = insertelement <4 x float> %ins1, float %ext2, i32 2
57 %ins3 = insertelement <4 x float> %ins2, float %ext3, i32 3
58 store <4 x float> %ins3, ptr %ptr, align 16
62 ; Make sure element type doesn't alter the codegen. Note that
63 ; if we were actually using the vector in this function and
64 ; have AVX2, we should generate vextracti128 (the int version).
65 define void @high_v8i32_to_v4i32(<8 x i32> %v, ptr %ptr) {
66 ; X86-LABEL: high_v8i32_to_v4i32:
68 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
69 ; X86-NEXT: vextractf128 $1, %ymm0, (%eax)
70 ; X86-NEXT: vzeroupper
73 ; X64-LABEL: high_v8i32_to_v4i32:
75 ; X64-NEXT: vextractf128 $1, %ymm0, (%rdi)
76 ; X64-NEXT: vzeroupper
78 %ext0 = extractelement <8 x i32> %v, i32 4
79 %ext1 = extractelement <8 x i32> %v, i32 5
80 %ext2 = extractelement <8 x i32> %v, i32 6
81 %ext3 = extractelement <8 x i32> %v, i32 7
82 %ins0 = insertelement <4 x i32> undef, i32 %ext0, i32 0
83 %ins1 = insertelement <4 x i32> %ins0, i32 %ext1, i32 1
84 %ins2 = insertelement <4 x i32> %ins1, i32 %ext2, i32 2
85 %ins3 = insertelement <4 x i32> %ins2, i32 %ext3, i32 3
86 store <4 x i32> %ins3, ptr %ptr, align 16
90 ; Make sure that element size doesn't alter the codegen.
91 define void @high_v4f64_to_v2f64(<4 x double> %v, ptr %ptr) {
92 ; X86-LABEL: high_v4f64_to_v2f64:
94 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
95 ; X86-NEXT: vextractf128 $1, %ymm0, (%eax)
96 ; X86-NEXT: vzeroupper
99 ; X64-LABEL: high_v4f64_to_v2f64:
101 ; X64-NEXT: vextractf128 $1, %ymm0, (%rdi)
102 ; X64-NEXT: vzeroupper
104 %ext0 = extractelement <4 x double> %v, i32 2
105 %ext1 = extractelement <4 x double> %v, i32 3
106 %ins0 = insertelement <2 x double> undef, double %ext0, i32 0
107 %ins1 = insertelement <2 x double> %ins0, double %ext1, i32 1
108 store <2 x double> %ins1, ptr %ptr, align 16
112 ; PR25320 Make sure that a widened (possibly legalized) vector correctly zero-extends upper elements.
113 ; FIXME - Ideally these should just call VMOVD/VMOVQ/VMOVSS/VMOVSD
115 define void @legal_vzmovl_2i32_8i32(ptr %in, ptr %out) {
116 ; X86-LABEL: legal_vzmovl_2i32_8i32:
118 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
119 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
120 ; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
121 ; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
122 ; X86-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
123 ; X86-NEXT: vmovaps %ymm0, (%eax)
124 ; X86-NEXT: vzeroupper
127 ; X64-LABEL: legal_vzmovl_2i32_8i32:
129 ; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
130 ; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
131 ; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
132 ; X64-NEXT: vmovaps %ymm0, (%rsi)
133 ; X64-NEXT: vzeroupper
135 %ld = load <2 x i32>, ptr %in, align 8
136 %ext = extractelement <2 x i32> %ld, i64 0
137 %ins = insertelement <8 x i32> <i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, i32 %ext, i64 0
138 store <8 x i32> %ins, ptr %out, align 32
142 define void @legal_vzmovl_2i64_4i64(ptr %in, ptr %out) {
143 ; X86-LABEL: legal_vzmovl_2i64_4i64:
145 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
146 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
147 ; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
148 ; X86-NEXT: vmovaps %ymm0, (%eax)
149 ; X86-NEXT: vzeroupper
152 ; X64-LABEL: legal_vzmovl_2i64_4i64:
154 ; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
155 ; X64-NEXT: vmovaps %ymm0, (%rsi)
156 ; X64-NEXT: vzeroupper
158 %ld = load <2 x i64>, ptr %in, align 8
159 %ext = extractelement <2 x i64> %ld, i64 0
160 %ins = insertelement <4 x i64> <i64 undef, i64 0, i64 0, i64 0>, i64 %ext, i64 0
161 store <4 x i64> %ins, ptr %out, align 32
165 define void @legal_vzmovl_2f32_8f32(ptr %in, ptr %out) {
166 ; X86-LABEL: legal_vzmovl_2f32_8f32:
168 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
169 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
170 ; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
171 ; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
172 ; X86-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
173 ; X86-NEXT: vmovaps %ymm0, (%eax)
174 ; X86-NEXT: vzeroupper
177 ; X64-LABEL: legal_vzmovl_2f32_8f32:
179 ; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
180 ; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
181 ; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
182 ; X64-NEXT: vmovaps %ymm0, (%rsi)
183 ; X64-NEXT: vzeroupper
185 %ld = load <2 x float>, ptr %in, align 8
186 %ext = extractelement <2 x float> %ld, i64 0
187 %ins = insertelement <8 x float> <float undef, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0>, float %ext, i64 0
188 store <8 x float> %ins, ptr %out, align 32
192 define void @legal_vzmovl_2f64_4f64(ptr %in, ptr %out) {
193 ; X86-LABEL: legal_vzmovl_2f64_4f64:
195 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
196 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
197 ; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
198 ; X86-NEXT: vmovaps %ymm0, (%eax)
199 ; X86-NEXT: vzeroupper
202 ; X64-LABEL: legal_vzmovl_2f64_4f64:
204 ; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
205 ; X64-NEXT: vmovaps %ymm0, (%rsi)
206 ; X64-NEXT: vzeroupper
208 %ld = load <2 x double>, ptr %in, align 8
209 %ext = extractelement <2 x double> %ld, i64 0
210 %ins = insertelement <4 x double> <double undef, double 0.0, double 0.0, double 0.0>, double %ext, i64 0
211 store <4 x double> %ins, ptr %out, align 32