1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx,slow-unaligned-mem-32 | FileCheck %s
3 ; RUN: llc -O0 < %s -mtriple=x86_64-unknown-unknown -mattr=avx,slow-unaligned-mem-32 | FileCheck %s -check-prefix=CHECK_O0
5 define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* nocapture %i) nounwind {
6 ; CHECK-LABEL: test_256_load:
7 ; CHECK: # %bb.0: # %entry
8 ; CHECK-NEXT: pushq %r15
9 ; CHECK-NEXT: pushq %r14
10 ; CHECK-NEXT: pushq %rbx
11 ; CHECK-NEXT: subq $96, %rsp
12 ; CHECK-NEXT: movq %rdx, %r14
13 ; CHECK-NEXT: movq %rsi, %r15
14 ; CHECK-NEXT: movq %rdi, %rbx
15 ; CHECK-NEXT: vmovaps (%rdi), %ymm0
16 ; CHECK-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
17 ; CHECK-NEXT: vmovaps (%rsi), %ymm1
18 ; CHECK-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
19 ; CHECK-NEXT: vmovaps (%rdx), %ymm2
20 ; CHECK-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
21 ; CHECK-NEXT: callq dummy
22 ; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
23 ; CHECK-NEXT: vmovaps %ymm0, (%rbx)
24 ; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
25 ; CHECK-NEXT: vmovaps %ymm0, (%r15)
26 ; CHECK-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
27 ; CHECK-NEXT: vmovaps %ymm0, (%r14)
28 ; CHECK-NEXT: addq $96, %rsp
29 ; CHECK-NEXT: popq %rbx
30 ; CHECK-NEXT: popq %r14
31 ; CHECK-NEXT: popq %r15
32 ; CHECK-NEXT: vzeroupper
35 ; CHECK_O0-LABEL: test_256_load:
36 ; CHECK_O0: # %bb.0: # %entry
37 ; CHECK_O0-NEXT: subq $152, %rsp
38 ; CHECK_O0-NEXT: vmovapd (%rdi), %ymm0
39 ; CHECK_O0-NEXT: vmovaps (%rsi), %ymm1
40 ; CHECK_O0-NEXT: vmovdqa (%rdx), %ymm2
41 ; CHECK_O0-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
42 ; CHECK_O0-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
43 ; CHECK_O0-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
44 ; CHECK_O0-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
45 ; CHECK_O0-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
46 ; CHECK_O0-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
47 ; CHECK_O0-NEXT: callq dummy
48 ; CHECK_O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
49 ; CHECK_O0-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
50 ; CHECK_O0-NEXT: vmovapd %ymm0, (%rax)
51 ; CHECK_O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
52 ; CHECK_O0-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
53 ; CHECK_O0-NEXT: vmovaps %ymm1, (%rcx)
54 ; CHECK_O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
55 ; CHECK_O0-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
56 ; CHECK_O0-NEXT: vmovdqa %ymm2, (%rdx)
57 ; CHECK_O0-NEXT: addq $152, %rsp
58 ; CHECK_O0-NEXT: vzeroupper
61 %0 = bitcast double* %d to <4 x double>*
62 %tmp1.i = load <4 x double>, <4 x double>* %0, align 32
63 %1 = bitcast float* %f to <8 x float>*
64 %tmp1.i17 = load <8 x float>, <8 x float>* %1, align 32
65 %tmp1.i16 = load <4 x i64>, <4 x i64>* %i, align 32
66 tail call void @dummy(<4 x double> %tmp1.i, <8 x float> %tmp1.i17, <4 x i64> %tmp1.i16) nounwind
67 store <4 x double> %tmp1.i, <4 x double>* %0, align 32
68 store <8 x float> %tmp1.i17, <8 x float>* %1, align 32
69 store <4 x i64> %tmp1.i16, <4 x i64>* %i, align 32
73 declare void @dummy(<4 x double>, <8 x float>, <4 x i64>)
76 ;; The two tests below check that we must fold load + scalar_to_vector
77 ;; + ins_subvec+ zext into only a single vmovss or vmovsd or vinsertps from memory
79 define <8 x float> @mov00(<8 x float> %v, float * %ptr) nounwind {
82 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
85 ; CHECK_O0-LABEL: mov00:
87 ; CHECK_O0-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
88 ; CHECK_O0-NEXT: # kill: def $ymm0 killed $xmm0
90 %val = load float, float* %ptr
91 %i0 = insertelement <8 x float> zeroinitializer, float %val, i32 0
95 define <4 x double> @mov01(<4 x double> %v, double * %ptr) nounwind {
98 ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
101 ; CHECK_O0-LABEL: mov01:
103 ; CHECK_O0-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
104 ; CHECK_O0-NEXT: # kill: def $ymm0 killed $xmm0
105 ; CHECK_O0-NEXT: retq
106 %val = load double, double* %ptr
107 %i0 = insertelement <4 x double> zeroinitializer, double %val, i32 0
111 define void @storev16i16(<16 x i16> %a) nounwind {
112 ; CHECK-LABEL: storev16i16:
114 ; CHECK-NEXT: vmovaps %ymm0, (%rax)
116 ; CHECK_O0-LABEL: storev16i16:
118 ; CHECK_O0-NEXT: # implicit-def: $rax
119 ; CHECK_O0-NEXT: vmovdqa %ymm0, (%rax)
120 store <16 x i16> %a, <16 x i16>* undef, align 32
124 define void @storev16i16_01(<16 x i16> %a) nounwind {
125 ; CHECK-LABEL: storev16i16_01:
127 ; CHECK-NEXT: vextractf128 $1, %ymm0, (%rax)
128 ; CHECK-NEXT: vmovups %xmm0, (%rax)
130 ; CHECK_O0-LABEL: storev16i16_01:
132 ; CHECK_O0-NEXT: # implicit-def: $rax
133 ; CHECK_O0-NEXT: vmovdqu %ymm0, (%rax)
134 store <16 x i16> %a, <16 x i16>* undef, align 4
138 define void @storev32i8(<32 x i8> %a) nounwind {
139 ; CHECK-LABEL: storev32i8:
141 ; CHECK-NEXT: vmovaps %ymm0, (%rax)
143 ; CHECK_O0-LABEL: storev32i8:
145 ; CHECK_O0-NEXT: # implicit-def: $rax
146 ; CHECK_O0-NEXT: vmovdqa %ymm0, (%rax)
147 store <32 x i8> %a, <32 x i8>* undef, align 32
151 define void @storev32i8_01(<32 x i8> %a) nounwind {
152 ; CHECK-LABEL: storev32i8_01:
154 ; CHECK-NEXT: vextractf128 $1, %ymm0, (%rax)
155 ; CHECK-NEXT: vmovups %xmm0, (%rax)
157 ; CHECK_O0-LABEL: storev32i8_01:
159 ; CHECK_O0-NEXT: # implicit-def: $rax
160 ; CHECK_O0-NEXT: vmovdqu %ymm0, (%rax)
161 store <32 x i8> %a, <32 x i8>* undef, align 4
165 ; It is faster to make two saves, if the data is already in xmm registers. For
166 ; example, after making an integer operation.
167 define void @double_save(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind ssp {
168 ; CHECK-LABEL: double_save:
170 ; CHECK-NEXT: vmovaps %xmm1, 16(%rdi)
171 ; CHECK-NEXT: vmovaps %xmm0, (%rdi)
174 ; CHECK_O0-LABEL: double_save:
176 ; CHECK_O0-NEXT: # implicit-def: $ymm2
177 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2
178 ; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
179 ; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi)
180 ; CHECK_O0-NEXT: vzeroupper
181 ; CHECK_O0-NEXT: retq
182 %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
183 store <8 x i32> %Z, <8 x i32>* %P, align 16
187 define void @double_save_volatile(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind {
188 ; CHECK-LABEL: double_save_volatile:
190 ; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
191 ; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
192 ; CHECK-NEXT: vmovups %ymm0, (%rdi)
193 ; CHECK-NEXT: vzeroupper
196 ; CHECK_O0-LABEL: double_save_volatile:
198 ; CHECK_O0-NEXT: # implicit-def: $ymm2
199 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2
200 ; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
201 ; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi)
202 ; CHECK_O0-NEXT: vzeroupper
203 ; CHECK_O0-NEXT: retq
204 %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
205 store volatile <8 x i32> %Z, <8 x i32>* %P, align 16
209 declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>) nounwind
211 define void @f_f() nounwind {
213 ; CHECK: # %bb.0: # %allocas
214 ; CHECK-NEXT: xorl %eax, %eax
215 ; CHECK-NEXT: testb %al, %al
216 ; CHECK-NEXT: jne .LBB9_2
217 ; CHECK-NEXT: # %bb.1: # %cif_mask_all
218 ; CHECK-NEXT: .LBB9_2: # %cif_mask_mixed
219 ; CHECK-NEXT: xorl %eax, %eax
220 ; CHECK-NEXT: testb %al, %al
221 ; CHECK-NEXT: jne .LBB9_4
222 ; CHECK-NEXT: # %bb.3: # %cif_mixed_test_all
223 ; CHECK-NEXT: movl $-1, %eax
224 ; CHECK-NEXT: vmovd %eax, %xmm0
225 ; CHECK-NEXT: vmaskmovps %ymm0, %ymm0, (%rax)
226 ; CHECK-NEXT: .LBB9_4: # %cif_mixed_test_any_check
228 ; CHECK_O0-LABEL: f_f:
229 ; CHECK_O0: # %bb.0: # %allocas
230 ; CHECK_O0-NEXT: # implicit-def: $al
231 ; CHECK_O0-NEXT: testb $1, %al
232 ; CHECK_O0-NEXT: jne .LBB9_1
233 ; CHECK_O0-NEXT: jmp .LBB9_2
234 ; CHECK_O0-NEXT: .LBB9_1: # %cif_mask_all
235 ; CHECK_O0-NEXT: .LBB9_2: # %cif_mask_mixed
236 ; CHECK_O0-NEXT: # implicit-def: $al
237 ; CHECK_O0-NEXT: testb $1, %al
238 ; CHECK_O0-NEXT: jne .LBB9_3
239 ; CHECK_O0-NEXT: jmp .LBB9_4
240 ; CHECK_O0-NEXT: .LBB9_3: # %cif_mixed_test_all
241 ; CHECK_O0-NEXT: movl $-1, %eax
242 ; CHECK_O0-NEXT: vmovd %eax, %xmm0
243 ; CHECK_O0-NEXT: vmovdqa %xmm0, %xmm0
244 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1
245 ; CHECK_O0-NEXT: # implicit-def: $rcx
246 ; CHECK_O0-NEXT: # implicit-def: $ymm2
247 ; CHECK_O0-NEXT: vmaskmovps %ymm2, %ymm1, (%rcx)
248 ; CHECK_O0-NEXT: .LBB9_4: # %cif_mixed_test_any_check
250 br i1 undef, label %cif_mask_all, label %cif_mask_mixed
256 br i1 undef, label %cif_mixed_test_all, label %cif_mixed_test_any_check
259 call void @llvm.x86.avx.maskstore.ps.256(i8* undef, <8 x i32> <i32 -1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, <8 x float> undef) nounwind
262 cif_mixed_test_any_check:
266 define void @add8i32(<8 x i32>* %ret, <8 x i32>* %bp) nounwind {
267 ; CHECK-LABEL: add8i32:
269 ; CHECK-NEXT: vmovups (%rsi), %xmm0
270 ; CHECK-NEXT: vmovups 16(%rsi), %xmm1
271 ; CHECK-NEXT: vmovups %xmm1, 16(%rdi)
272 ; CHECK-NEXT: vmovups %xmm0, (%rdi)
275 ; CHECK_O0-LABEL: add8i32:
277 ; CHECK_O0-NEXT: vmovdqu (%rsi), %xmm0
278 ; CHECK_O0-NEXT: vmovdqu 16(%rsi), %xmm1
279 ; CHECK_O0-NEXT: # implicit-def: $ymm2
280 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2
281 ; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
282 ; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi)
283 ; CHECK_O0-NEXT: vzeroupper
284 ; CHECK_O0-NEXT: retq
285 %b = load <8 x i32>, <8 x i32>* %bp, align 1
286 %x = add <8 x i32> zeroinitializer, %b
287 store <8 x i32> %x, <8 x i32>* %ret, align 1
291 define void @add4i64a64(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
292 ; CHECK-LABEL: add4i64a64:
294 ; CHECK-NEXT: vmovaps (%rsi), %ymm0
295 ; CHECK-NEXT: vmovaps %ymm0, (%rdi)
296 ; CHECK-NEXT: vzeroupper
299 ; CHECK_O0-LABEL: add4i64a64:
301 ; CHECK_O0-NEXT: vmovaps (%rsi), %ymm0
302 ; CHECK_O0-NEXT: vmovdqa %ymm0, (%rdi)
303 ; CHECK_O0-NEXT: vzeroupper
304 ; CHECK_O0-NEXT: retq
305 %b = load <4 x i64>, <4 x i64>* %bp, align 64
306 %x = add <4 x i64> zeroinitializer, %b
307 store <4 x i64> %x, <4 x i64>* %ret, align 64
311 define void @add4i64a16(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
312 ; CHECK-LABEL: add4i64a16:
314 ; CHECK-NEXT: vmovaps (%rsi), %xmm0
315 ; CHECK-NEXT: vmovaps 16(%rsi), %xmm1
316 ; CHECK-NEXT: vmovaps %xmm1, 16(%rdi)
317 ; CHECK-NEXT: vmovaps %xmm0, (%rdi)
320 ; CHECK_O0-LABEL: add4i64a16:
322 ; CHECK_O0-NEXT: vmovdqa (%rsi), %xmm0
323 ; CHECK_O0-NEXT: vmovdqa 16(%rsi), %xmm1
324 ; CHECK_O0-NEXT: # implicit-def: $ymm2
325 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2
326 ; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
327 ; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi)
328 ; CHECK_O0-NEXT: vzeroupper
329 ; CHECK_O0-NEXT: retq
330 %b = load <4 x i64>, <4 x i64>* %bp, align 16
331 %x = add <4 x i64> zeroinitializer, %b
332 store <4 x i64> %x, <4 x i64>* %ret, align 16