1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+avx2 | FileCheck %s --check-prefix=X86
3 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s --check-prefix=X64
5 define i32 @f(<8 x float> %A, ptr %B, <4 x double> %C, <4 x i64> %E, <8 x i32> %F, <16 x i16> %G, <32 x i8> %H, ptr %loadptr) nounwind {
9 ; X86-NEXT: movl %esp, %ebp
10 ; X86-NEXT: andl $-32, %esp
11 ; X86-NEXT: subl $32, %esp
12 ; X86-NEXT: vmovdqa 104(%ebp), %ymm3
13 ; X86-NEXT: vmovdqa 72(%ebp), %ymm4
14 ; X86-NEXT: vmovdqa 40(%ebp), %ymm5
15 ; X86-NEXT: movl 8(%ebp), %ecx
16 ; X86-NEXT: movl 136(%ebp), %edx
17 ; X86-NEXT: movl (%edx), %eax
18 ; X86-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
19 ; X86-NEXT: vmovntps %ymm0, (%ecx)
20 ; X86-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm0
21 ; X86-NEXT: addl (%edx), %eax
22 ; X86-NEXT: vmovntdq %ymm0, (%ecx)
23 ; X86-NEXT: vaddpd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
24 ; X86-NEXT: addl (%edx), %eax
25 ; X86-NEXT: vmovntpd %ymm0, (%ecx)
26 ; X86-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm5, %ymm0
27 ; X86-NEXT: addl (%edx), %eax
28 ; X86-NEXT: vmovntdq %ymm0, (%ecx)
29 ; X86-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}, %ymm4, %ymm0
30 ; X86-NEXT: addl (%edx), %eax
31 ; X86-NEXT: vmovntdq %ymm0, (%ecx)
32 ; X86-NEXT: vpaddb {{\.?LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm0
33 ; X86-NEXT: addl (%edx), %eax
34 ; X86-NEXT: vmovntdq %ymm0, (%ecx)
35 ; X86-NEXT: movl %ebp, %esp
37 ; X86-NEXT: vzeroupper
42 ; X64-NEXT: movl (%rsi), %eax
43 ; X64-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
44 ; X64-NEXT: vmovntps %ymm0, (%rdi)
45 ; X64-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm0
46 ; X64-NEXT: addl (%rsi), %eax
47 ; X64-NEXT: vmovntdq %ymm0, (%rdi)
48 ; X64-NEXT: vaddpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
49 ; X64-NEXT: addl (%rsi), %eax
50 ; X64-NEXT: vmovntpd %ymm0, (%rdi)
51 ; X64-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm0
52 ; X64-NEXT: addl (%rsi), %eax
53 ; X64-NEXT: vmovntdq %ymm0, (%rdi)
54 ; X64-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm0
55 ; X64-NEXT: addl (%rsi), %eax
56 ; X64-NEXT: vmovntdq %ymm0, (%rdi)
57 ; X64-NEXT: vpaddb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm0
58 ; X64-NEXT: addl (%rsi), %eax
59 ; X64-NEXT: vmovntdq %ymm0, (%rdi)
60 ; X64-NEXT: vzeroupper
62 %v0 = load i32, ptr %loadptr, align 1
63 %A2 = fadd <8 x float> %A, <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>
64 store <8 x float> %A2, ptr %B, align 32, !nontemporal !0
65 %v1 = load i32, ptr %loadptr, align 1
66 %E2 = add <4 x i64> %E, <i64 1, i64 2, i64 3, i64 4>
67 store <4 x i64> %E2, ptr %B, align 32, !nontemporal !0
68 %v2 = load i32, ptr %loadptr, align 1
69 %C2 = fadd <4 x double> %C, <double 1.0, double 2.0, double 3.0, double 4.0>
70 store <4 x double> %C2, ptr %B, align 32, !nontemporal !0
71 %v3 = load i32, ptr %loadptr, align 1
72 %F2 = add <8 x i32> %F, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
73 store <8 x i32> %F2, ptr %B, align 32, !nontemporal !0
74 %v4 = load i32, ptr %loadptr, align 1
75 %G2 = add <16 x i16> %G, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>
76 store <16 x i16> %G2, ptr %B, align 32, !nontemporal !0
77 %v5 = load i32, ptr %loadptr, align 1
78 %H2 = add <32 x i8> %H, <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>
79 store <32 x i8> %H2, ptr %B, align 32, !nontemporal !0
80 %v6 = load i32, ptr %loadptr, align 1
81 %sum1 = add i32 %v0, %v1
82 %sum2 = add i32 %sum1, %v2
83 %sum3 = add i32 %sum2, %v3
84 %sum4 = add i32 %sum3, %v4
85 %sum5 = add i32 %sum4, %v5
86 %sum6 = add i32 %sum5, %v6