1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32-SSE
3 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32-AVX
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64-SSE
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64-AVX
7 define i32 @f(<4 x float> %A, i8* %B, <2 x double> %C, i32 %D, <2 x i64> %E, <4 x i32> %F, <8 x i16> %G, <16 x i8> %H, i64 %I, i32* %loadptr) nounwind {
10 ; X32-SSE-NEXT: pushl %ebp
11 ; X32-SSE-NEXT: movl %esp, %ebp
12 ; X32-SSE-NEXT: pushl %esi
13 ; X32-SSE-NEXT: andl $-16, %esp
14 ; X32-SSE-NEXT: subl $16, %esp
15 ; X32-SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
16 ; X32-SSE-NEXT: movl 12(%ebp), %ecx
17 ; X32-SSE-NEXT: movdqa 56(%ebp), %xmm4
18 ; X32-SSE-NEXT: movdqa 40(%ebp), %xmm5
19 ; X32-SSE-NEXT: movdqa 24(%ebp), %xmm6
20 ; X32-SSE-NEXT: movl 8(%ebp), %esi
21 ; X32-SSE-NEXT: movl 80(%ebp), %edx
22 ; X32-SSE-NEXT: movl (%edx), %eax
23 ; X32-SSE-NEXT: addps {{\.LCPI.*}}, %xmm0
24 ; X32-SSE-NEXT: movntps %xmm0, (%esi)
25 ; X32-SSE-NEXT: paddq {{\.LCPI.*}}, %xmm2
26 ; X32-SSE-NEXT: addl (%edx), %eax
27 ; X32-SSE-NEXT: movntdq %xmm2, (%esi)
28 ; X32-SSE-NEXT: addpd {{\.LCPI.*}}, %xmm1
29 ; X32-SSE-NEXT: addl (%edx), %eax
30 ; X32-SSE-NEXT: movntpd %xmm1, (%esi)
31 ; X32-SSE-NEXT: paddd {{\.LCPI.*}}, %xmm6
32 ; X32-SSE-NEXT: addl (%edx), %eax
33 ; X32-SSE-NEXT: movntdq %xmm6, (%esi)
34 ; X32-SSE-NEXT: paddw {{\.LCPI.*}}, %xmm5
35 ; X32-SSE-NEXT: addl (%edx), %eax
36 ; X32-SSE-NEXT: movntdq %xmm5, (%esi)
37 ; X32-SSE-NEXT: paddb {{\.LCPI.*}}, %xmm4
38 ; X32-SSE-NEXT: addl (%edx), %eax
39 ; X32-SSE-NEXT: movntdq %xmm4, (%esi)
40 ; X32-SSE-NEXT: addl (%edx), %eax
41 ; X32-SSE-NEXT: movntil %ecx, (%esi)
42 ; X32-SSE-NEXT: addl (%edx), %eax
43 ; X32-SSE-NEXT: movsd %xmm3, (%esi)
44 ; X32-SSE-NEXT: addl (%edx), %eax
45 ; X32-SSE-NEXT: leal -4(%ebp), %esp
46 ; X32-SSE-NEXT: popl %esi
47 ; X32-SSE-NEXT: popl %ebp
52 ; X32-AVX-NEXT: pushl %ebp
53 ; X32-AVX-NEXT: movl %esp, %ebp
54 ; X32-AVX-NEXT: pushl %esi
55 ; X32-AVX-NEXT: andl $-16, %esp
56 ; X32-AVX-NEXT: subl $16, %esp
57 ; X32-AVX-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
58 ; X32-AVX-NEXT: movl 12(%ebp), %ecx
59 ; X32-AVX-NEXT: vmovdqa 56(%ebp), %xmm4
60 ; X32-AVX-NEXT: vmovdqa 40(%ebp), %xmm5
61 ; X32-AVX-NEXT: vmovdqa 24(%ebp), %xmm6
62 ; X32-AVX-NEXT: movl 8(%ebp), %edx
63 ; X32-AVX-NEXT: movl 80(%ebp), %esi
64 ; X32-AVX-NEXT: movl (%esi), %eax
65 ; X32-AVX-NEXT: vaddps {{\.LCPI.*}}, %xmm0, %xmm0
66 ; X32-AVX-NEXT: vmovntps %xmm0, (%edx)
67 ; X32-AVX-NEXT: vpaddq {{\.LCPI.*}}, %xmm2, %xmm0
68 ; X32-AVX-NEXT: addl (%esi), %eax
69 ; X32-AVX-NEXT: vmovntdq %xmm0, (%edx)
70 ; X32-AVX-NEXT: vaddpd {{\.LCPI.*}}, %xmm1, %xmm0
71 ; X32-AVX-NEXT: addl (%esi), %eax
72 ; X32-AVX-NEXT: vmovntpd %xmm0, (%edx)
73 ; X32-AVX-NEXT: vpaddd {{\.LCPI.*}}, %xmm6, %xmm0
74 ; X32-AVX-NEXT: addl (%esi), %eax
75 ; X32-AVX-NEXT: vmovntdq %xmm0, (%edx)
76 ; X32-AVX-NEXT: vpaddw {{\.LCPI.*}}, %xmm5, %xmm0
77 ; X32-AVX-NEXT: addl (%esi), %eax
78 ; X32-AVX-NEXT: vmovntdq %xmm0, (%edx)
79 ; X32-AVX-NEXT: vpaddb {{\.LCPI.*}}, %xmm4, %xmm0
80 ; X32-AVX-NEXT: addl (%esi), %eax
81 ; X32-AVX-NEXT: vmovntdq %xmm0, (%edx)
82 ; X32-AVX-NEXT: addl (%esi), %eax
83 ; X32-AVX-NEXT: movntil %ecx, (%edx)
84 ; X32-AVX-NEXT: addl (%esi), %eax
85 ; X32-AVX-NEXT: vmovsd %xmm3, (%edx)
86 ; X32-AVX-NEXT: addl (%esi), %eax
87 ; X32-AVX-NEXT: leal -4(%ebp), %esp
88 ; X32-AVX-NEXT: popl %esi
89 ; X32-AVX-NEXT: popl %ebp
94 ; X64-SSE-NEXT: movl (%rcx), %eax
95 ; X64-SSE-NEXT: addps {{.*}}(%rip), %xmm0
96 ; X64-SSE-NEXT: movntps %xmm0, (%rdi)
97 ; X64-SSE-NEXT: paddq {{.*}}(%rip), %xmm2
98 ; X64-SSE-NEXT: addl (%rcx), %eax
99 ; X64-SSE-NEXT: movntdq %xmm2, (%rdi)
100 ; X64-SSE-NEXT: addpd {{.*}}(%rip), %xmm1
101 ; X64-SSE-NEXT: addl (%rcx), %eax
102 ; X64-SSE-NEXT: movntpd %xmm1, (%rdi)
103 ; X64-SSE-NEXT: paddd {{.*}}(%rip), %xmm3
104 ; X64-SSE-NEXT: addl (%rcx), %eax
105 ; X64-SSE-NEXT: movntdq %xmm3, (%rdi)
106 ; X64-SSE-NEXT: paddw {{.*}}(%rip), %xmm4
107 ; X64-SSE-NEXT: addl (%rcx), %eax
108 ; X64-SSE-NEXT: movntdq %xmm4, (%rdi)
109 ; X64-SSE-NEXT: paddb {{.*}}(%rip), %xmm5
110 ; X64-SSE-NEXT: addl (%rcx), %eax
111 ; X64-SSE-NEXT: movntdq %xmm5, (%rdi)
112 ; X64-SSE-NEXT: addl (%rcx), %eax
113 ; X64-SSE-NEXT: movntil %esi, (%rdi)
114 ; X64-SSE-NEXT: addl (%rcx), %eax
115 ; X64-SSE-NEXT: movntiq %rdx, (%rdi)
116 ; X64-SSE-NEXT: addl (%rcx), %eax
121 ; X64-AVX-NEXT: movl (%rcx), %eax
122 ; X64-AVX-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
123 ; X64-AVX-NEXT: vmovntps %xmm0, (%rdi)
124 ; X64-AVX-NEXT: vpaddq {{.*}}(%rip), %xmm2, %xmm0
125 ; X64-AVX-NEXT: addl (%rcx), %eax
126 ; X64-AVX-NEXT: vmovntdq %xmm0, (%rdi)
127 ; X64-AVX-NEXT: vaddpd {{.*}}(%rip), %xmm1, %xmm0
128 ; X64-AVX-NEXT: addl (%rcx), %eax
129 ; X64-AVX-NEXT: vmovntpd %xmm0, (%rdi)
130 ; X64-AVX-NEXT: vpaddd {{.*}}(%rip), %xmm3, %xmm0
131 ; X64-AVX-NEXT: addl (%rcx), %eax
132 ; X64-AVX-NEXT: vmovntdq %xmm0, (%rdi)
133 ; X64-AVX-NEXT: vpaddw {{.*}}(%rip), %xmm4, %xmm0
134 ; X64-AVX-NEXT: addl (%rcx), %eax
135 ; X64-AVX-NEXT: vmovntdq %xmm0, (%rdi)
136 ; X64-AVX-NEXT: vpaddb {{.*}}(%rip), %xmm5, %xmm0
137 ; X64-AVX-NEXT: addl (%rcx), %eax
138 ; X64-AVX-NEXT: vmovntdq %xmm0, (%rdi)
139 ; X64-AVX-NEXT: addl (%rcx), %eax
140 ; X64-AVX-NEXT: movntil %esi, (%rdi)
141 ; X64-AVX-NEXT: addl (%rcx), %eax
142 ; X64-AVX-NEXT: movntiq %rdx, (%rdi)
143 ; X64-AVX-NEXT: addl (%rcx), %eax
145 %v0 = load i32, i32* %loadptr, align 1
146 %cast = bitcast i8* %B to <4 x float>*
147 %A2 = fadd <4 x float> %A, <float 1.0, float 2.0, float 3.0, float 4.0>
148 store <4 x float> %A2, <4 x float>* %cast, align 16, !nontemporal !0
149 %v1 = load i32, i32* %loadptr, align 1
150 %cast1 = bitcast i8* %B to <2 x i64>*
151 %E2 = add <2 x i64> %E, <i64 1, i64 2>
152 store <2 x i64> %E2, <2 x i64>* %cast1, align 16, !nontemporal !0
153 %v2 = load i32, i32* %loadptr, align 1
154 %cast2 = bitcast i8* %B to <2 x double>*
155 %C2 = fadd <2 x double> %C, <double 1.0, double 2.0>
156 store <2 x double> %C2, <2 x double>* %cast2, align 16, !nontemporal !0
157 %v3 = load i32, i32* %loadptr, align 1
158 %cast3 = bitcast i8* %B to <4 x i32>*
159 %F2 = add <4 x i32> %F, <i32 1, i32 2, i32 3, i32 4>
160 store <4 x i32> %F2, <4 x i32>* %cast3, align 16, !nontemporal !0
161 %v4 = load i32, i32* %loadptr, align 1
162 %cast4 = bitcast i8* %B to <8 x i16>*
163 %G2 = add <8 x i16> %G, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>
164 store <8 x i16> %G2, <8 x i16>* %cast4, align 16, !nontemporal !0
165 %v5 = load i32, i32* %loadptr, align 1
166 %cast5 = bitcast i8* %B to <16 x i8>*
167 %H2 = add <16 x i8> %H, <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>
168 store <16 x i8> %H2, <16 x i8>* %cast5, align 16, !nontemporal !0
169 %v6 = load i32, i32* %loadptr, align 1
170 %cast6 = bitcast i8* %B to i32*
171 store i32 %D, i32* %cast6, align 1, !nontemporal !0
172 %v7 = load i32, i32* %loadptr, align 1
173 %cast7 = bitcast i8* %B to i64*
174 store i64 %I, i64* %cast7, align 1, !nontemporal !0
175 %v8 = load i32, i32* %loadptr, align 1
176 %sum1 = add i32 %v0, %v1
177 %sum2 = add i32 %sum1, %v2
178 %sum3 = add i32 %sum2, %v3
179 %sum4 = add i32 %sum3, %v4
180 %sum5 = add i32 %sum4, %v5
181 %sum6 = add i32 %sum5, %v6
182 %sum7 = add i32 %sum6, %v7
183 %sum8 = add i32 %sum7, %v8