1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-linux -mattr=+sse4.2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-SSE
3 ; RUN: llc < %s -mtriple=i686-linux -mattr=+avx | FileCheck %s --check-prefix=X86 --check-prefix=X86-AVX --check-prefix=X86-AVX1
4 ; RUN: llc < %s -mtriple=i686-linux -mattr=+avx2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-AVX --check-prefix=X86-AVX2
5 ; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse4.2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE
6 ; RUN: llc < %s -mtriple=x86_64-linux -mattr=+avx | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX --check-prefix=X64-AVX1
7 ; RUN: llc < %s -mtriple=x86_64-linux -mattr=+avx2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX --check-prefix=X64-AVX2
11 define <7 x i64> @load7_aligned(<7 x i64>* %x) {
12 ; X86-SSE-LABEL: load7_aligned:
14 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
15 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
16 ; X86-SSE-NEXT: movaps (%ecx), %xmm0
17 ; X86-SSE-NEXT: movaps 16(%ecx), %xmm1
18 ; X86-SSE-NEXT: movaps 32(%ecx), %xmm2
19 ; X86-SSE-NEXT: movl 48(%ecx), %edx
20 ; X86-SSE-NEXT: movl 52(%ecx), %ecx
21 ; X86-SSE-NEXT: movl %ecx, 52(%eax)
22 ; X86-SSE-NEXT: movl %edx, 48(%eax)
23 ; X86-SSE-NEXT: movaps %xmm2, 32(%eax)
24 ; X86-SSE-NEXT: movaps %xmm1, 16(%eax)
25 ; X86-SSE-NEXT: movaps %xmm0, (%eax)
26 ; X86-SSE-NEXT: retl $4
28 ; X86-AVX-LABEL: load7_aligned:
30 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
31 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
32 ; X86-AVX-NEXT: vmovaps (%ecx), %ymm0
33 ; X86-AVX-NEXT: vmovaps 48(%ecx), %xmm1
34 ; X86-AVX-NEXT: vextractps $1, %xmm1, 52(%eax)
35 ; X86-AVX-NEXT: vmovss %xmm1, 48(%eax)
36 ; X86-AVX-NEXT: vmovaps 32(%ecx), %xmm1
37 ; X86-AVX-NEXT: vmovaps %xmm1, 32(%eax)
38 ; X86-AVX-NEXT: vmovaps %ymm0, (%eax)
39 ; X86-AVX-NEXT: vzeroupper
40 ; X86-AVX-NEXT: retl $4
42 ; X64-SSE-LABEL: load7_aligned:
44 ; X64-SSE-NEXT: movq %rdi, %rax
45 ; X64-SSE-NEXT: movaps (%rsi), %xmm0
46 ; X64-SSE-NEXT: movaps 16(%rsi), %xmm1
47 ; X64-SSE-NEXT: movaps 32(%rsi), %xmm2
48 ; X64-SSE-NEXT: movq 48(%rsi), %rcx
49 ; X64-SSE-NEXT: movq %rcx, 48(%rdi)
50 ; X64-SSE-NEXT: movaps %xmm2, 32(%rdi)
51 ; X64-SSE-NEXT: movaps %xmm1, 16(%rdi)
52 ; X64-SSE-NEXT: movaps %xmm0, (%rdi)
55 ; X64-AVX-LABEL: load7_aligned:
57 ; X64-AVX-NEXT: movq %rdi, %rax
58 ; X64-AVX-NEXT: vmovaps (%rsi), %ymm0
59 ; X64-AVX-NEXT: movq 48(%rsi), %rcx
60 ; X64-AVX-NEXT: movq %rcx, 48(%rdi)
61 ; X64-AVX-NEXT: vmovaps 32(%rsi), %xmm1
62 ; X64-AVX-NEXT: vmovaps %xmm1, 32(%rdi)
63 ; X64-AVX-NEXT: vmovaps %ymm0, (%rdi)
64 ; X64-AVX-NEXT: vzeroupper
66 %x1 = load <7 x i64>, <7 x i64>* %x
70 define <7 x i64> @load7_unaligned(<7 x i64>* %x) {
71 ; X86-SSE-LABEL: load7_unaligned:
73 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
74 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
75 ; X86-SSE-NEXT: movups (%ecx), %xmm0
76 ; X86-SSE-NEXT: movups 16(%ecx), %xmm1
77 ; X86-SSE-NEXT: movups 32(%ecx), %xmm2
78 ; X86-SSE-NEXT: movl 48(%ecx), %edx
79 ; X86-SSE-NEXT: movl 52(%ecx), %ecx
80 ; X86-SSE-NEXT: movl %ecx, 52(%eax)
81 ; X86-SSE-NEXT: movl %edx, 48(%eax)
82 ; X86-SSE-NEXT: movaps %xmm2, 32(%eax)
83 ; X86-SSE-NEXT: movaps %xmm1, 16(%eax)
84 ; X86-SSE-NEXT: movaps %xmm0, (%eax)
85 ; X86-SSE-NEXT: retl $4
87 ; X86-AVX-LABEL: load7_unaligned:
89 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
90 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
91 ; X86-AVX-NEXT: vmovups (%ecx), %ymm0
92 ; X86-AVX-NEXT: vmovups 32(%ecx), %xmm1
93 ; X86-AVX-NEXT: movl 48(%ecx), %edx
94 ; X86-AVX-NEXT: movl 52(%ecx), %ecx
95 ; X86-AVX-NEXT: movl %ecx, 52(%eax)
96 ; X86-AVX-NEXT: movl %edx, 48(%eax)
97 ; X86-AVX-NEXT: vmovaps %xmm1, 32(%eax)
98 ; X86-AVX-NEXT: vmovaps %ymm0, (%eax)
99 ; X86-AVX-NEXT: vzeroupper
100 ; X86-AVX-NEXT: retl $4
102 ; X64-SSE-LABEL: load7_unaligned:
104 ; X64-SSE-NEXT: movq %rdi, %rax
105 ; X64-SSE-NEXT: movups (%rsi), %xmm0
106 ; X64-SSE-NEXT: movups 16(%rsi), %xmm1
107 ; X64-SSE-NEXT: movups 32(%rsi), %xmm2
108 ; X64-SSE-NEXT: movq 48(%rsi), %rcx
109 ; X64-SSE-NEXT: movq %rcx, 48(%rdi)
110 ; X64-SSE-NEXT: movaps %xmm2, 32(%rdi)
111 ; X64-SSE-NEXT: movaps %xmm1, 16(%rdi)
112 ; X64-SSE-NEXT: movaps %xmm0, (%rdi)
115 ; X64-AVX-LABEL: load7_unaligned:
117 ; X64-AVX-NEXT: movq %rdi, %rax
118 ; X64-AVX-NEXT: vmovups (%rsi), %ymm0
119 ; X64-AVX-NEXT: vmovups 32(%rsi), %xmm1
120 ; X64-AVX-NEXT: movq 48(%rsi), %rcx
121 ; X64-AVX-NEXT: movq %rcx, 48(%rdi)
122 ; X64-AVX-NEXT: vmovaps %xmm1, 32(%rdi)
123 ; X64-AVX-NEXT: vmovaps %ymm0, (%rdi)
124 ; X64-AVX-NEXT: vzeroupper
126 %x1 = load <7 x i64>, <7 x i64>* %x, align 1
130 ; PR42305 - https://bugs.llvm.org/show_bug.cgi?id=42305
132 define void @load_split(<8 x float>* %ld, <4 x float>* %st1, <4 x float>* %st2) nounwind {
133 ; X86-SSE-LABEL: load_split:
135 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
136 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
137 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
138 ; X86-SSE-NEXT: movups (%edx), %xmm0
139 ; X86-SSE-NEXT: movups 16(%edx), %xmm1
140 ; X86-SSE-NEXT: movups %xmm0, (%ecx)
141 ; X86-SSE-NEXT: movups %xmm1, (%eax)
144 ; X86-AVX-LABEL: load_split:
146 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
147 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
148 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
149 ; X86-AVX-NEXT: vmovups (%edx), %ymm0
150 ; X86-AVX-NEXT: vmovups %xmm0, (%ecx)
151 ; X86-AVX-NEXT: vextractf128 $1, %ymm0, (%eax)
152 ; X86-AVX-NEXT: vzeroupper
155 ; X64-SSE-LABEL: load_split:
157 ; X64-SSE-NEXT: movups (%rdi), %xmm0
158 ; X64-SSE-NEXT: movups 16(%rdi), %xmm1
159 ; X64-SSE-NEXT: movups %xmm0, (%rsi)
160 ; X64-SSE-NEXT: movups %xmm1, (%rdx)
163 ; X64-AVX-LABEL: load_split:
165 ; X64-AVX-NEXT: vmovups (%rdi), %ymm0
166 ; X64-AVX-NEXT: vmovups %xmm0, (%rsi)
167 ; X64-AVX-NEXT: vextractf128 $1, %ymm0, (%rdx)
168 ; X64-AVX-NEXT: vzeroupper
170 %t256 = load <8 x float>, <8 x float>* %ld, align 1
171 %b128 = shufflevector <8 x float> %t256, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
172 store <4 x float> %b128, <4 x float>* %st1, align 1
173 %t128 = shufflevector <8 x float> %t256, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
174 store <4 x float> %t128, <4 x float>* %st2, align 1
178 define void @load_split_more(float* %src, i32* %idx, float* %dst) nounwind {
179 ; X86-SSE-LABEL: load_split_more:
181 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
182 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
183 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
184 ; X86-SSE-NEXT: movups (%edx), %xmm0
185 ; X86-SSE-NEXT: movups 16(%edx), %xmm1
186 ; X86-SSE-NEXT: movl (%ecx), %edx
187 ; X86-SSE-NEXT: movups %xmm0, (%eax,%edx,4)
188 ; X86-SSE-NEXT: movl 4(%ecx), %ecx
189 ; X86-SSE-NEXT: movups %xmm1, (%eax,%ecx,4)
192 ; X86-AVX-LABEL: load_split_more:
194 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
195 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
196 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
197 ; X86-AVX-NEXT: vmovups (%edx), %ymm0
198 ; X86-AVX-NEXT: movl (%ecx), %edx
199 ; X86-AVX-NEXT: vmovups %xmm0, (%eax,%edx,4)
200 ; X86-AVX-NEXT: movl 4(%ecx), %ecx
201 ; X86-AVX-NEXT: vextractf128 $1, %ymm0, (%eax,%ecx,4)
202 ; X86-AVX-NEXT: vzeroupper
205 ; X64-SSE-LABEL: load_split_more:
207 ; X64-SSE-NEXT: movups (%rdi), %xmm0
208 ; X64-SSE-NEXT: movups 16(%rdi), %xmm1
209 ; X64-SSE-NEXT: movslq (%rsi), %rax
210 ; X64-SSE-NEXT: movups %xmm0, (%rdx,%rax,4)
211 ; X64-SSE-NEXT: movslq 4(%rsi), %rax
212 ; X64-SSE-NEXT: movups %xmm1, (%rdx,%rax,4)
215 ; X64-AVX-LABEL: load_split_more:
217 ; X64-AVX-NEXT: vmovups (%rdi), %ymm0
218 ; X64-AVX-NEXT: movslq (%rsi), %rax
219 ; X64-AVX-NEXT: vmovups %xmm0, (%rdx,%rax,4)
220 ; X64-AVX-NEXT: movslq 4(%rsi), %rax
221 ; X64-AVX-NEXT: vextractf128 $1, %ymm0, (%rdx,%rax,4)
222 ; X64-AVX-NEXT: vzeroupper
224 %v.i = bitcast float* %src to <8 x float>*
225 %tmp = load <8 x float>, <8 x float>* %v.i, align 1
226 %tmp1 = load i32, i32* %idx, align 4
227 %idx.ext = sext i32 %tmp1 to i64
228 %add.ptr1 = getelementptr inbounds float, float* %dst, i64 %idx.ext
229 %extract = shufflevector <8 x float> %tmp, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
230 %v.i11 = bitcast float* %add.ptr1 to <4 x float>*
231 store <4 x float> %extract, <4 x float>* %v.i11, align 1
232 %arrayidx2 = getelementptr inbounds i32, i32* %idx, i64 1
233 %tmp2 = load i32, i32* %arrayidx2, align 4
234 %idx.ext3 = sext i32 %tmp2 to i64
235 %add.ptr4 = getelementptr inbounds float, float* %dst, i64 %idx.ext3
236 %extract5 = shufflevector <8 x float> %tmp, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
237 %v.i10 = bitcast float* %add.ptr4 to <4 x float>*
238 store <4 x float> %extract5, <4 x float>* %v.i10, align 1