1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512f,+avx512bw | FileCheck %s
4 define i32 @f(<16 x float> %A, <16 x float> %AA, i8* %B, <8 x double> %C, <8 x double> %CC, <8 x i64> %E, <8 x i64> %EE, <16 x i32> %F, <16 x i32> %FF, <32 x i16> %G, <32 x i16> %GG, <64 x i8> %H, <64 x i8> %HH, i32 * %loadptr) {
7 ; CHECK-NEXT: pushq %rbp
8 ; CHECK-NEXT: .cfi_def_cfa_offset 16
9 ; CHECK-NEXT: .cfi_offset %rbp, -16
10 ; CHECK-NEXT: movq %rsp, %rbp
11 ; CHECK-NEXT: .cfi_def_cfa_register %rbp
12 ; CHECK-NEXT: andq $-64, %rsp
13 ; CHECK-NEXT: subq $64, %rsp
14 ; CHECK-NEXT: vmovdqa64 144(%rbp), %zmm8
15 ; CHECK-NEXT: vmovdqa64 16(%rbp), %zmm9
16 ; CHECK-NEXT: movl (%rsi), %eax
17 ; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
18 ; CHECK-NEXT: vmovntps %zmm0, (%rdi)
19 ; CHECK-NEXT: vpaddq %zmm5, %zmm4, %zmm0
20 ; CHECK-NEXT: addl (%rsi), %eax
21 ; CHECK-NEXT: vmovntdq %zmm0, (%rdi)
22 ; CHECK-NEXT: vaddpd %zmm3, %zmm2, %zmm0
23 ; CHECK-NEXT: addl (%rsi), %eax
24 ; CHECK-NEXT: vmovntpd %zmm0, (%rdi)
25 ; CHECK-NEXT: vpaddd %zmm7, %zmm6, %zmm0
26 ; CHECK-NEXT: addl (%rsi), %eax
27 ; CHECK-NEXT: vmovntdq %zmm0, (%rdi)
28 ; CHECK-NEXT: vpaddw 80(%rbp), %zmm9, %zmm0
29 ; CHECK-NEXT: addl (%rsi), %eax
30 ; CHECK-NEXT: vmovntdq %zmm0, (%rdi)
31 ; CHECK-NEXT: vpaddb 208(%rbp), %zmm8, %zmm0
32 ; CHECK-NEXT: addl (%rsi), %eax
33 ; CHECK-NEXT: vmovntdq %zmm0, (%rdi)
34 ; CHECK-NEXT: addl (%rsi), %eax
35 ; CHECK-NEXT: movq %rbp, %rsp
36 ; CHECK-NEXT: popq %rbp
37 ; CHECK-NEXT: .cfi_def_cfa %rsp, 8
38 ; CHECK-NEXT: vzeroupper
40 %v0 = load i32, i32* %loadptr, align 1
41 %cast = bitcast i8* %B to <16 x float>*
42 %A2 = fadd <16 x float> %A, %AA
43 store <16 x float> %A2, <16 x float>* %cast, align 64, !nontemporal !0
44 %v1 = load i32, i32* %loadptr, align 1
45 %cast1 = bitcast i8* %B to <8 x i64>*
46 %E2 = add <8 x i64> %E, %EE
47 store <8 x i64> %E2, <8 x i64>* %cast1, align 64, !nontemporal !0
48 %v2 = load i32, i32* %loadptr, align 1
49 %cast2 = bitcast i8* %B to <8 x double>*
50 %C2 = fadd <8 x double> %C, %CC
51 store <8 x double> %C2, <8 x double>* %cast2, align 64, !nontemporal !0
52 %v3 = load i32, i32* %loadptr, align 1
53 %cast3 = bitcast i8* %B to <16 x i32>*
54 %F2 = add <16 x i32> %F, %FF
55 store <16 x i32> %F2, <16 x i32>* %cast3, align 64, !nontemporal !0
56 %v4 = load i32, i32* %loadptr, align 1
57 %cast4 = bitcast i8* %B to <32 x i16>*
58 %G2 = add <32 x i16> %G, %GG
59 store <32 x i16> %G2, <32 x i16>* %cast4, align 64, !nontemporal !0
60 %v5 = load i32, i32* %loadptr, align 1
61 %cast5 = bitcast i8* %B to <64 x i8>*
62 %H2 = add <64 x i8> %H, %HH
63 store <64 x i8> %H2, <64 x i8>* %cast5, align 64, !nontemporal !0
64 %v6 = load i32, i32* %loadptr, align 1
65 %sum1 = add i32 %v0, %v1
66 %sum2 = add i32 %sum1, %v2
67 %sum3 = add i32 %sum2, %v3
68 %sum4 = add i32 %sum3, %v4
69 %sum5 = add i32 %sum4, %v5
70 %sum6 = add i32 %sum5, %v6