1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -mtriple=x86_64-unknown-linux -mcpu=skylake-avx512 -passes=slp-vectorizer -S | FileCheck %s
3 ; RUN: opt < %s -mtriple=x86_64-unknown-linux -mcpu=skylake-avx512 -passes=inject-tli-mappings,slp-vectorizer -vector-library=SVML -S | FileCheck %s --check-prefix=VECLIB
5 @src = common global [8 x double] zeroinitializer, align 64
6 @dst = common global [8 x double] zeroinitializer, align 64
8 declare double @llvm.sqrt.f64(double)
9 declare double @llvm.sin.f64(double)
13 ; CHECK-NEXT: [[A0:%.*]] = load double, ptr @src, align 8
14 ; CHECK-NEXT: [[A1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 1), align 8
15 ; CHECK-NEXT: [[A2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 2), align 8
16 ; CHECK-NEXT: [[A3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 3), align 8
17 ; CHECK-NEXT: [[A4:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 4), align 8
18 ; CHECK-NEXT: [[A5:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 5), align 8
19 ; CHECK-NEXT: [[A6:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 6), align 8
20 ; CHECK-NEXT: [[A7:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 7), align 8
21 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> poison, double [[A2]], i32 0
22 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x double> [[TMP1]], double [[A6]], i32 1
23 ; CHECK-NEXT: [[TMP3:%.*]] = call fast <2 x double> @llvm.sin.v2f64(<2 x double> [[TMP2]])
24 ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> poison, double [[A3]], i32 0
25 ; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x double> [[TMP4]], double [[A7]], i32 1
26 ; CHECK-NEXT: [[TMP6:%.*]] = call fast <2 x double> @llvm.sin.v2f64(<2 x double> [[TMP5]])
27 ; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> poison, double [[A0]], i32 0
28 ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[TMP7]], double [[A4]], i32 1
29 ; CHECK-NEXT: [[TMP9:%.*]] = call fast <2 x double> @llvm.sqrt.v2f64(<2 x double> [[TMP8]])
30 ; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x double> poison, double [[A1]], i32 0
31 ; CHECK-NEXT: [[TMP11:%.*]] = insertelement <2 x double> [[TMP10]], double [[A5]], i32 1
32 ; CHECK-NEXT: [[TMP12:%.*]] = call fast <2 x double> @llvm.sqrt.v2f64(<2 x double> [[TMP11]])
33 ; CHECK-NEXT: [[TMP13:%.*]] = fadd fast <2 x double> [[TMP9]], [[TMP6]]
34 ; CHECK-NEXT: [[TMP14:%.*]] = fadd fast <2 x double> [[TMP3]], [[TMP12]]
35 ; CHECK-NEXT: [[TMP15:%.*]] = fadd fast <2 x double> [[TMP13]], [[TMP14]]
36 ; CHECK-NEXT: store <2 x double> [[TMP15]], ptr @dst, align 8
37 ; CHECK-NEXT: ret void
39 ; VECLIB-LABEL: @test(
40 ; VECLIB-NEXT: [[A0:%.*]] = load double, ptr @src, align 8
41 ; VECLIB-NEXT: [[A1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 1), align 8
42 ; VECLIB-NEXT: [[A2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 2), align 8
43 ; VECLIB-NEXT: [[A3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 3), align 8
44 ; VECLIB-NEXT: [[A4:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 4), align 8
45 ; VECLIB-NEXT: [[A5:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 5), align 8
46 ; VECLIB-NEXT: [[A6:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 6), align 8
47 ; VECLIB-NEXT: [[A7:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 7), align 8
48 ; VECLIB-NEXT: [[TMP1:%.*]] = insertelement <2 x double> poison, double [[A2]], i32 0
49 ; VECLIB-NEXT: [[TMP2:%.*]] = insertelement <2 x double> [[TMP1]], double [[A6]], i32 1
50 ; VECLIB-NEXT: [[TMP3:%.*]] = call fast <2 x double> @__svml_sin2(<2 x double> [[TMP2]])
51 ; VECLIB-NEXT: [[TMP4:%.*]] = insertelement <2 x double> poison, double [[A3]], i32 0
52 ; VECLIB-NEXT: [[TMP5:%.*]] = insertelement <2 x double> [[TMP4]], double [[A7]], i32 1
53 ; VECLIB-NEXT: [[TMP6:%.*]] = call fast <2 x double> @__svml_sin2(<2 x double> [[TMP5]])
54 ; VECLIB-NEXT: [[TMP7:%.*]] = insertelement <2 x double> poison, double [[A0]], i32 0
55 ; VECLIB-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[TMP7]], double [[A4]], i32 1
56 ; VECLIB-NEXT: [[TMP9:%.*]] = call fast <2 x double> @llvm.sqrt.v2f64(<2 x double> [[TMP8]])
57 ; VECLIB-NEXT: [[TMP10:%.*]] = insertelement <2 x double> poison, double [[A1]], i32 0
58 ; VECLIB-NEXT: [[TMP11:%.*]] = insertelement <2 x double> [[TMP10]], double [[A5]], i32 1
59 ; VECLIB-NEXT: [[TMP12:%.*]] = call fast <2 x double> @llvm.sqrt.v2f64(<2 x double> [[TMP11]])
60 ; VECLIB-NEXT: [[TMP13:%.*]] = fadd fast <2 x double> [[TMP9]], [[TMP6]]
61 ; VECLIB-NEXT: [[TMP14:%.*]] = fadd fast <2 x double> [[TMP3]], [[TMP12]]
62 ; VECLIB-NEXT: [[TMP15:%.*]] = fadd fast <2 x double> [[TMP13]], [[TMP14]]
63 ; VECLIB-NEXT: store <2 x double> [[TMP15]], ptr @dst, align 8
64 ; VECLIB-NEXT: ret void
67 %a0 = load double, ptr @src, align 8
68 %a1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 1), align 8
69 %a2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 2), align 8
70 %a3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 3), align 8
71 %a4 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 4), align 8
72 %a5 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 5), align 8
73 %a6 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 6), align 8
74 %a7 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 7), align 8
75 %sin0 = call fast double @llvm.sin.f64(double %a2)
76 %sin1 = call fast double @llvm.sin.f64(double %a3)
77 %sqrt0 = call fast double @llvm.sqrt.f64(double %a0)
78 %sqrt1 = call fast double @llvm.sqrt.f64(double %a1)
79 %sin2 = call fast double @llvm.sin.f64(double %a6)
80 %sin3 = call fast double @llvm.sin.f64(double %a7)
81 %sqrt2 = call fast double @llvm.sqrt.f64(double %a4)
82 %sqrt3 = call fast double @llvm.sqrt.f64(double %a5)
83 %res1 = fadd fast double %sqrt0, %sin1
84 %res2 = fadd fast double %sin0, %sqrt1
85 %res00 = fadd fast double %res1, %res2
86 %res3 = fadd fast double %sqrt2, %sin3
87 %res4 = fadd fast double %sin2, %sqrt3
88 %res01 = fadd fast double %res3, %res4
89 store double %res00, ptr @dst, align 8
90 store double %res01, ptr getelementptr inbounds ([8 x double], ptr @dst, i32 0, i64 1), align 8