1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx,slow-unaligned-mem-32 | FileCheck %s
3 ; RUN: llc -O0 < %s -mtriple=x86_64-unknown-unknown -mattr=avx,slow-unaligned-mem-32 | FileCheck %s -check-prefix=CHECK_O0
5 define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* nocapture %i) nounwind {
6 ; CHECK-LABEL: test_256_load:
7 ; CHECK: # BB#0: # %entry
8 ; CHECK-NEXT: pushq %r15
9 ; CHECK-NEXT: pushq %r14
10 ; CHECK-NEXT: pushq %rbx
11 ; CHECK-NEXT: subq $96, %rsp
12 ; CHECK-NEXT: movq %rdx, %r14
13 ; CHECK-NEXT: movq %rsi, %r15
14 ; CHECK-NEXT: movq %rdi, %rbx
15 ; CHECK-NEXT: vmovaps (%rbx), %ymm0
16 ; CHECK-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
17 ; CHECK-NEXT: vmovaps (%r15), %ymm1
18 ; CHECK-NEXT: vmovups %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill
19 ; CHECK-NEXT: vmovaps (%r14), %ymm2
20 ; CHECK-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
21 ; CHECK-NEXT: callq dummy
22 ; CHECK-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
23 ; CHECK-NEXT: vmovaps %ymm0, (%rbx)
24 ; CHECK-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
25 ; CHECK-NEXT: vmovaps %ymm0, (%r15)
26 ; CHECK-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
27 ; CHECK-NEXT: vmovaps %ymm0, (%r14)
28 ; CHECK-NEXT: addq $96, %rsp
29 ; CHECK-NEXT: popq %rbx
30 ; CHECK-NEXT: popq %r14
31 ; CHECK-NEXT: popq %r15
32 ; CHECK-NEXT: vzeroupper
35 ; CHECK_O0-LABEL: test_256_load:
36 ; CHECK_O0: # BB#0: # %entry
37 ; CHECK_O0-NEXT: subq $152, %rsp
38 ; CHECK_O0-NEXT: vmovapd (%rdi), %ymm0
39 ; CHECK_O0-NEXT: vmovaps (%rsi), %ymm1
40 ; CHECK_O0-NEXT: vmovdqa (%rdx), %ymm2
41 ; CHECK_O0-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
42 ; CHECK_O0-NEXT: vmovups %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill
43 ; CHECK_O0-NEXT: vmovups %ymm2, {{[0-9]+}}(%rsp) # 32-byte Spill
44 ; CHECK_O0-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
45 ; CHECK_O0-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
46 ; CHECK_O0-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
47 ; CHECK_O0-NEXT: callq dummy
48 ; CHECK_O0-NEXT: movq {{[0-9]+}}(%rsp), %rdx # 8-byte Reload
49 ; CHECK_O0-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
50 ; CHECK_O0-NEXT: vmovapd %ymm0, (%rdx)
51 ; CHECK_O0-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
52 ; CHECK_O0-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm1 # 32-byte Reload
53 ; CHECK_O0-NEXT: vmovaps %ymm1, (%rsi)
54 ; CHECK_O0-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
55 ; CHECK_O0-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm2 # 32-byte Reload
56 ; CHECK_O0-NEXT: vmovdqa %ymm2, (%rdi)
57 ; CHECK_O0-NEXT: addq $152, %rsp
58 ; CHECK_O0-NEXT: vzeroupper
61 %0 = bitcast double* %d to <4 x double>*
62 %tmp1.i = load <4 x double>, <4 x double>* %0, align 32
63 %1 = bitcast float* %f to <8 x float>*
64 %tmp1.i17 = load <8 x float>, <8 x float>* %1, align 32
65 %tmp1.i16 = load <4 x i64>, <4 x i64>* %i, align 32
66 tail call void @dummy(<4 x double> %tmp1.i, <8 x float> %tmp1.i17, <4 x i64> %tmp1.i16) nounwind
67 store <4 x double> %tmp1.i, <4 x double>* %0, align 32
68 store <8 x float> %tmp1.i17, <8 x float>* %1, align 32
69 store <4 x i64> %tmp1.i16, <4 x i64>* %i, align 32
73 declare void @dummy(<4 x double>, <8 x float>, <4 x i64>)
76 ;; The two tests below check that we must fold load + scalar_to_vector
77 ;; + ins_subvec+ zext into only a single vmovss or vmovsd or vinsertps from memory
79 define <8 x float> @mov00(<8 x float> %v, float * %ptr) nounwind {
82 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
85 ; CHECK_O0-LABEL: mov00:
87 ; CHECK_O0-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
88 ; CHECK_O0-NEXT: # implicit-def: %ymm1
89 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1
90 ; CHECK_O0-NEXT: vxorps %xmm2, %xmm2, %xmm2
91 ; CHECK_O0-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm2[1,2,3,4,5,6,7]
93 %val = load float, float* %ptr
94 %i0 = insertelement <8 x float> zeroinitializer, float %val, i32 0
98 define <4 x double> @mov01(<4 x double> %v, double * %ptr) nounwind {
101 ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
104 ; CHECK_O0-LABEL: mov01:
106 ; CHECK_O0-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
107 ; CHECK_O0-NEXT: # implicit-def: %ymm1
108 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1
109 ; CHECK_O0-NEXT: vxorps %xmm2, %xmm2, %xmm2
110 ; CHECK_O0-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm2[1,2,3]
111 ; CHECK_O0-NEXT: retq
112 %val = load double, double* %ptr
113 %i0 = insertelement <4 x double> zeroinitializer, double %val, i32 0
117 define void @storev16i16(<16 x i16> %a) nounwind {
118 ; CHECK-LABEL: storev16i16:
120 ; CHECK-NEXT: vmovaps %ymm0, (%rax)
122 ; CHECK_O0-LABEL: storev16i16:
124 ; CHECK_O0-NEXT: # implicit-def: %rax
125 ; CHECK_O0-NEXT: vmovdqa %ymm0, (%rax)
126 store <16 x i16> %a, <16 x i16>* undef, align 32
130 define void @storev16i16_01(<16 x i16> %a) nounwind {
131 ; CHECK-LABEL: storev16i16_01:
133 ; CHECK-NEXT: vextractf128 $1, %ymm0, (%rax)
134 ; CHECK-NEXT: vmovups %xmm0, (%rax)
136 ; CHECK_O0-LABEL: storev16i16_01:
138 ; CHECK_O0-NEXT: # implicit-def: %rax
139 ; CHECK_O0-NEXT: vmovdqu %ymm0, (%rax)
140 store <16 x i16> %a, <16 x i16>* undef, align 4
144 define void @storev32i8(<32 x i8> %a) nounwind {
145 ; CHECK-LABEL: storev32i8:
147 ; CHECK-NEXT: vmovaps %ymm0, (%rax)
149 ; CHECK_O0-LABEL: storev32i8:
151 ; CHECK_O0-NEXT: # implicit-def: %rax
152 ; CHECK_O0-NEXT: vmovdqa %ymm0, (%rax)
153 store <32 x i8> %a, <32 x i8>* undef, align 32
157 define void @storev32i8_01(<32 x i8> %a) nounwind {
158 ; CHECK-LABEL: storev32i8_01:
160 ; CHECK-NEXT: vextractf128 $1, %ymm0, (%rax)
161 ; CHECK-NEXT: vmovups %xmm0, (%rax)
163 ; CHECK_O0-LABEL: storev32i8_01:
165 ; CHECK_O0-NEXT: # implicit-def: %rax
166 ; CHECK_O0-NEXT: vmovdqu %ymm0, (%rax)
167 store <32 x i8> %a, <32 x i8>* undef, align 4
171 ; It is faster to make two saves, if the data is already in xmm registers. For
172 ; example, after making an integer operation.
173 define void @double_save(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind ssp {
174 ; CHECK-LABEL: double_save:
176 ; CHECK-NEXT: vmovaps %xmm1, 16(%rdi)
177 ; CHECK-NEXT: vmovaps %xmm0, (%rdi)
180 ; CHECK_O0-LABEL: double_save:
182 ; CHECK_O0-NEXT: # implicit-def: %ymm2
183 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2
184 ; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
185 ; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi)
186 ; CHECK_O0-NEXT: vzeroupper
187 ; CHECK_O0-NEXT: retq
188 %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
189 store <8 x i32> %Z, <8 x i32>* %P, align 16
193 declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>) nounwind
195 define void @f_f() nounwind {
197 ; CHECK: # BB#0: # %allocas
198 ; CHECK-NEXT: xorl %eax, %eax
199 ; CHECK-NEXT: testb %al, %al
200 ; CHECK-NEXT: jne .LBB8_2
201 ; CHECK-NEXT: # BB#1: # %cif_mask_all
202 ; CHECK-NEXT: .LBB8_2: # %cif_mask_mixed
203 ; CHECK-NEXT: xorl %eax, %eax
204 ; CHECK-NEXT: testb %al, %al
205 ; CHECK-NEXT: jne .LBB8_4
206 ; CHECK-NEXT: # BB#3: # %cif_mixed_test_all
207 ; CHECK-NEXT: movl $-1, %eax
208 ; CHECK-NEXT: vmovd %eax, %xmm0
209 ; CHECK-NEXT: vmaskmovps %ymm0, %ymm0, (%rax)
210 ; CHECK-NEXT: .LBB8_4: # %cif_mixed_test_any_check
212 ; CHECK_O0-LABEL: f_f:
213 ; CHECK_O0: # BB#0: # %allocas
214 ; CHECK_O0-NEXT: # implicit-def: %al
215 ; CHECK_O0-NEXT: testb $1, %al
216 ; CHECK_O0-NEXT: jne .LBB8_1
217 ; CHECK_O0-NEXT: jmp .LBB8_2
218 ; CHECK_O0-NEXT: .LBB8_1: # %cif_mask_all
219 ; CHECK_O0-NEXT: .LBB8_2: # %cif_mask_mixed
220 ; CHECK_O0-NEXT: # implicit-def: %al
221 ; CHECK_O0-NEXT: testb $1, %al
222 ; CHECK_O0-NEXT: jne .LBB8_3
223 ; CHECK_O0-NEXT: jmp .LBB8_4
224 ; CHECK_O0-NEXT: .LBB8_3: # %cif_mixed_test_all
225 ; CHECK_O0-NEXT: movl $-1, %eax
226 ; CHECK_O0-NEXT: vmovd %eax, %xmm0
227 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1
228 ; CHECK_O0-NEXT: # implicit-def: %rcx
229 ; CHECK_O0-NEXT: # implicit-def: %ymm2
230 ; CHECK_O0-NEXT: vmaskmovps %ymm2, %ymm1, (%rcx)
231 ; CHECK_O0-NEXT: .LBB8_4: # %cif_mixed_test_any_check
233 br i1 undef, label %cif_mask_all, label %cif_mask_mixed
239 br i1 undef, label %cif_mixed_test_all, label %cif_mixed_test_any_check
242 call void @llvm.x86.avx.maskstore.ps.256(i8* undef, <8 x i32> <i32 -1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, <8 x float> undef) nounwind
245 cif_mixed_test_any_check:
249 define void @add8i32(<8 x i32>* %ret, <8 x i32>* %bp) nounwind {
250 ; CHECK-LABEL: add8i32:
252 ; CHECK-NEXT: vmovups (%rsi), %xmm0
253 ; CHECK-NEXT: vmovups 16(%rsi), %xmm1
254 ; CHECK-NEXT: vmovups %xmm1, 16(%rdi)
255 ; CHECK-NEXT: vmovups %xmm0, (%rdi)
258 ; CHECK_O0-LABEL: add8i32:
260 ; CHECK_O0-NEXT: vmovdqu (%rsi), %xmm0
261 ; CHECK_O0-NEXT: vmovdqu 16(%rsi), %xmm1
262 ; CHECK_O0-NEXT: # implicit-def: %ymm2
263 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2
264 ; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
265 ; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi)
266 ; CHECK_O0-NEXT: vzeroupper
267 ; CHECK_O0-NEXT: retq
268 %b = load <8 x i32>, <8 x i32>* %bp, align 1
269 %x = add <8 x i32> zeroinitializer, %b
270 store <8 x i32> %x, <8 x i32>* %ret, align 1
274 define void @add4i64a64(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
275 ; CHECK-LABEL: add4i64a64:
277 ; CHECK-NEXT: vmovaps (%rsi), %ymm0
278 ; CHECK-NEXT: vmovaps %ymm0, (%rdi)
279 ; CHECK-NEXT: vzeroupper
282 ; CHECK_O0-LABEL: add4i64a64:
284 ; CHECK_O0-NEXT: vmovaps (%rsi), %ymm0
285 ; CHECK_O0-NEXT: vmovdqa %ymm0, (%rdi)
286 ; CHECK_O0-NEXT: vzeroupper
287 ; CHECK_O0-NEXT: retq
288 %b = load <4 x i64>, <4 x i64>* %bp, align 64
289 %x = add <4 x i64> zeroinitializer, %b
290 store <4 x i64> %x, <4 x i64>* %ret, align 64
294 define void @add4i64a16(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
295 ; CHECK-LABEL: add4i64a16:
297 ; CHECK-NEXT: vmovaps (%rsi), %xmm0
298 ; CHECK-NEXT: vmovaps 16(%rsi), %xmm1
299 ; CHECK-NEXT: vmovaps %xmm1, 16(%rdi)
300 ; CHECK-NEXT: vmovaps %xmm0, (%rdi)
303 ; CHECK_O0-LABEL: add4i64a16:
305 ; CHECK_O0-NEXT: vmovdqa (%rsi), %xmm0
306 ; CHECK_O0-NEXT: vmovdqa 16(%rsi), %xmm1
307 ; CHECK_O0-NEXT: # implicit-def: %ymm2
308 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2
309 ; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
310 ; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi)
311 ; CHECK_O0-NEXT: vzeroupper
312 ; CHECK_O0-NEXT: retq
313 %b = load <4 x i64>, <4 x i64>* %bp, align 16
314 %x = add <4 x i64> zeroinitializer, %b
315 store <4 x i64> %x, <4 x i64>* %ret, align 16