1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx --show-mc-encoding | FileCheck %s
4 define i32 @f256(<8 x float> %A, <8 x float> %AA, i8* %B, <4 x double> %C, <4 x double> %CC, i32 %D, <4 x i64> %E, <4 x i64> %EE, i32* %loadptr) {
7 ; CHECK-NEXT: movl (%rdx), %eax ## encoding: [0x8b,0x02]
8 ; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc1]
9 ; CHECK-NEXT: vmovntps %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x2b,0x07]
10 ; CHECK-NEXT: vpaddq %ymm5, %ymm4, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xd4,0xc5]
11 ; CHECK-NEXT: addl (%rdx), %eax ## encoding: [0x03,0x02]
12 ; CHECK-NEXT: vmovntdq %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe7,0x07]
13 ; CHECK-NEXT: vaddpd %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc3]
14 ; CHECK-NEXT: addl (%rdx), %eax ## encoding: [0x03,0x02]
15 ; CHECK-NEXT: vmovntpd %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x2b,0x07]
16 ; CHECK-NEXT: addl (%rdx), %eax ## encoding: [0x03,0x02]
17 ; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
18 ; CHECK-NEXT: retq ## encoding: [0xc3]
19 %v0 = load i32, i32* %loadptr, align 1
20 %cast = bitcast i8* %B to <8 x float>*
21 %A2 = fadd <8 x float> %A, %AA
22 store <8 x float> %A2, <8 x float>* %cast, align 64, !nontemporal !0
23 %v1 = load i32, i32* %loadptr, align 1
24 %cast1 = bitcast i8* %B to <4 x i64>*
25 %E2 = add <4 x i64> %E, %EE
26 store <4 x i64> %E2, <4 x i64>* %cast1, align 64, !nontemporal !0
27 %v2 = load i32, i32* %loadptr, align 1
28 %cast2 = bitcast i8* %B to <4 x double>*
29 %C2 = fadd <4 x double> %C, %CC
30 store <4 x double> %C2, <4 x double>* %cast2, align 64, !nontemporal !0
31 %v3 = load i32, i32* %loadptr, align 1
32 %sum1 = add i32 %v0, %v1
33 %sum2 = add i32 %sum1, %v2
34 %sum3 = add i32 %sum2, %v3
38 define i32 @f128(<4 x float> %A, <4 x float> %AA, i8* %B, <2 x double> %C, <2 x double> %CC, i32 %D, <2 x i64> %E, <2 x i64> %EE, i32* %loadptr) {
41 ; CHECK-NEXT: movl (%rdx), %eax ## encoding: [0x8b,0x02]
42 ; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc1]
43 ; CHECK-NEXT: vmovntps %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2b,0x07]
44 ; CHECK-NEXT: vpaddq %xmm5, %xmm4, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xd4,0xc5]
45 ; CHECK-NEXT: addl (%rdx), %eax ## encoding: [0x03,0x02]
46 ; CHECK-NEXT: vmovntdq %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe7,0x07]
47 ; CHECK-NEXT: vaddpd %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xc3]
48 ; CHECK-NEXT: addl (%rdx), %eax ## encoding: [0x03,0x02]
49 ; CHECK-NEXT: vmovntpd %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2b,0x07]
50 ; CHECK-NEXT: addl (%rdx), %eax ## encoding: [0x03,0x02]
51 ; CHECK-NEXT: retq ## encoding: [0xc3]
52 %v0 = load i32, i32* %loadptr, align 1
53 %cast = bitcast i8* %B to <4 x float>*
54 %A2 = fadd <4 x float> %A, %AA
55 store <4 x float> %A2, <4 x float>* %cast, align 64, !nontemporal !0
56 %v1 = load i32, i32* %loadptr, align 1
57 %cast1 = bitcast i8* %B to <2 x i64>*
58 %E2 = add <2 x i64> %E, %EE
59 store <2 x i64> %E2, <2 x i64>* %cast1, align 64, !nontemporal !0
60 %v2 = load i32, i32* %loadptr, align 1
61 %cast2 = bitcast i8* %B to <2 x double>*
62 %C2 = fadd <2 x double> %C, %CC
63 store <2 x double> %C2, <2 x double>* %cast2, align 64, !nontemporal !0
64 %v3 = load i32, i32* %loadptr, align 1
65 %sum1 = add i32 %v0, %v1
66 %sum2 = add i32 %sum1, %v2
67 %sum3 = add i32 %sum2, %v3