1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32-SSE
3 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32-AVX
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64-SSE
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64-AVX
7 define void @f(<4 x float> %A, i8* %B, <2 x double> %C, i32 %D, <2 x i64> %E, <4 x i32> %F, <8 x i16> %G, <16 x i8> %H, i64 %I) nounwind {
10 ; X32-SSE-NEXT: pushl %ebp
11 ; X32-SSE-NEXT: movl %esp, %ebp
12 ; X32-SSE-NEXT: andl $-16, %esp
13 ; X32-SSE-NEXT: subl $16, %esp
14 ; X32-SSE-NEXT: movl 72(%ebp), %eax
15 ; X32-SSE-NEXT: movl 76(%ebp), %ecx
16 ; X32-SSE-NEXT: movdqa 56(%ebp), %xmm3
17 ; X32-SSE-NEXT: movdqa 40(%ebp), %xmm4
18 ; X32-SSE-NEXT: movdqa 24(%ebp), %xmm5
19 ; X32-SSE-NEXT: movl 8(%ebp), %edx
20 ; X32-SSE-NEXT: addps {{\.LCPI.*}}, %xmm0
21 ; X32-SSE-NEXT: movntps %xmm0, (%edx)
22 ; X32-SSE-NEXT: paddq {{\.LCPI.*}}, %xmm2
23 ; X32-SSE-NEXT: movntdq %xmm2, (%edx)
24 ; X32-SSE-NEXT: addpd {{\.LCPI.*}}, %xmm1
25 ; X32-SSE-NEXT: movntpd %xmm1, (%edx)
26 ; X32-SSE-NEXT: paddd {{\.LCPI.*}}, %xmm5
27 ; X32-SSE-NEXT: movntdq %xmm5, (%edx)
28 ; X32-SSE-NEXT: paddw {{\.LCPI.*}}, %xmm4
29 ; X32-SSE-NEXT: movntdq %xmm4, (%edx)
30 ; X32-SSE-NEXT: paddb {{\.LCPI.*}}, %xmm3
31 ; X32-SSE-NEXT: movntdq %xmm3, (%edx)
32 ; X32-SSE-NEXT: movntil %ecx, 4(%edx)
33 ; X32-SSE-NEXT: movntil %eax, (%edx)
34 ; X32-SSE-NEXT: movl %ebp, %esp
35 ; X32-SSE-NEXT: popl %ebp
40 ; X32-AVX-NEXT: pushl %ebp
41 ; X32-AVX-NEXT: movl %esp, %ebp
42 ; X32-AVX-NEXT: andl $-16, %esp
43 ; X32-AVX-NEXT: subl $16, %esp
44 ; X32-AVX-NEXT: movl 72(%ebp), %eax
45 ; X32-AVX-NEXT: movl 76(%ebp), %ecx
46 ; X32-AVX-NEXT: vmovdqa 56(%ebp), %xmm3
47 ; X32-AVX-NEXT: vmovdqa 40(%ebp), %xmm4
48 ; X32-AVX-NEXT: vmovdqa 24(%ebp), %xmm5
49 ; X32-AVX-NEXT: movl 8(%ebp), %edx
50 ; X32-AVX-NEXT: vaddps {{\.LCPI.*}}, %xmm0, %xmm0
51 ; X32-AVX-NEXT: vmovntps %xmm0, (%edx)
52 ; X32-AVX-NEXT: vpaddq {{\.LCPI.*}}, %xmm2, %xmm0
53 ; X32-AVX-NEXT: vmovntdq %xmm0, (%edx)
54 ; X32-AVX-NEXT: vaddpd {{\.LCPI.*}}, %xmm1, %xmm0
55 ; X32-AVX-NEXT: vmovntpd %xmm0, (%edx)
56 ; X32-AVX-NEXT: vpaddd {{\.LCPI.*}}, %xmm5, %xmm0
57 ; X32-AVX-NEXT: vmovntdq %xmm0, (%edx)
58 ; X32-AVX-NEXT: vpaddw {{\.LCPI.*}}, %xmm4, %xmm0
59 ; X32-AVX-NEXT: vmovntdq %xmm0, (%edx)
60 ; X32-AVX-NEXT: vpaddb {{\.LCPI.*}}, %xmm3, %xmm0
61 ; X32-AVX-NEXT: vmovntdq %xmm0, (%edx)
62 ; X32-AVX-NEXT: movntil %ecx, 4(%edx)
63 ; X32-AVX-NEXT: movntil %eax, (%edx)
64 ; X32-AVX-NEXT: movl %ebp, %esp
65 ; X32-AVX-NEXT: popl %ebp
70 ; X64-SSE-NEXT: addps {{.*}}(%rip), %xmm0
71 ; X64-SSE-NEXT: movntps %xmm0, (%rdi)
72 ; X64-SSE-NEXT: paddq {{.*}}(%rip), %xmm2
73 ; X64-SSE-NEXT: movntdq %xmm2, (%rdi)
74 ; X64-SSE-NEXT: addpd {{.*}}(%rip), %xmm1
75 ; X64-SSE-NEXT: movntpd %xmm1, (%rdi)
76 ; X64-SSE-NEXT: paddd {{.*}}(%rip), %xmm3
77 ; X64-SSE-NEXT: movntdq %xmm3, (%rdi)
78 ; X64-SSE-NEXT: paddw {{.*}}(%rip), %xmm4
79 ; X64-SSE-NEXT: movntdq %xmm4, (%rdi)
80 ; X64-SSE-NEXT: paddb {{.*}}(%rip), %xmm5
81 ; X64-SSE-NEXT: movntdq %xmm5, (%rdi)
82 ; X64-SSE-NEXT: movntil %esi, (%rdi)
83 ; X64-SSE-NEXT: movntiq %rdx, (%rdi)
88 ; X64-AVX-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
89 ; X64-AVX-NEXT: vmovntps %xmm0, (%rdi)
90 ; X64-AVX-NEXT: vpaddq {{.*}}(%rip), %xmm2, %xmm0
91 ; X64-AVX-NEXT: vmovntdq %xmm0, (%rdi)
92 ; X64-AVX-NEXT: vaddpd {{.*}}(%rip), %xmm1, %xmm0
93 ; X64-AVX-NEXT: vmovntpd %xmm0, (%rdi)
94 ; X64-AVX-NEXT: vpaddd {{.*}}(%rip), %xmm3, %xmm0
95 ; X64-AVX-NEXT: vmovntdq %xmm0, (%rdi)
96 ; X64-AVX-NEXT: vpaddw {{.*}}(%rip), %xmm4, %xmm0
97 ; X64-AVX-NEXT: vmovntdq %xmm0, (%rdi)
98 ; X64-AVX-NEXT: vpaddb {{.*}}(%rip), %xmm5, %xmm0
99 ; X64-AVX-NEXT: vmovntdq %xmm0, (%rdi)
100 ; X64-AVX-NEXT: movntil %esi, (%rdi)
101 ; X64-AVX-NEXT: movntiq %rdx, (%rdi)
103 %cast = bitcast i8* %B to <4 x float>*
104 %A2 = fadd <4 x float> %A, <float 1.0, float 2.0, float 3.0, float 4.0>
105 store <4 x float> %A2, <4 x float>* %cast, align 16, !nontemporal !0
106 %cast1 = bitcast i8* %B to <2 x i64>*
107 %E2 = add <2 x i64> %E, <i64 1, i64 2>
108 store <2 x i64> %E2, <2 x i64>* %cast1, align 16, !nontemporal !0
109 %cast2 = bitcast i8* %B to <2 x double>*
110 %C2 = fadd <2 x double> %C, <double 1.0, double 2.0>
111 store <2 x double> %C2, <2 x double>* %cast2, align 16, !nontemporal !0
112 %cast3 = bitcast i8* %B to <4 x i32>*
113 %F2 = add <4 x i32> %F, <i32 1, i32 2, i32 3, i32 4>
114 store <4 x i32> %F2, <4 x i32>* %cast3, align 16, !nontemporal !0
115 %cast4 = bitcast i8* %B to <8 x i16>*
116 %G2 = add <8 x i16> %G, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>
117 store <8 x i16> %G2, <8 x i16>* %cast4, align 16, !nontemporal !0
118 %cast5 = bitcast i8* %B to <16 x i8>*
119 %H2 = add <16 x i8> %H, <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>
120 store <16 x i8> %H2, <16 x i8>* %cast5, align 16, !nontemporal !0
121 %cast6 = bitcast i8* %B to i32*
122 store i32 %D, i32* %cast6, align 1, !nontemporal !0
123 %cast7 = bitcast i8* %B to i64*
124 store i64 %I, i64* %cast7, align 1, !nontemporal !0