1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -S -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -mcpu=cascadelake < %s | FileCheck %s
7 ; CHECK-NEXT: [[ARRAYIDX10_I_I86:%.*]] = getelementptr inbounds float, ptr undef, i64 2
8 ; CHECK-NEXT: [[ARRAYIDX21_I:%.*]] = getelementptr inbounds [4 x float], ptr undef, i64 2
9 ; CHECK-NEXT: br label [[BB1:%.*]]
11 ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x float>, ptr undef, align 4
12 ; CHECK-NEXT: [[TMP1:%.*]] = fsub <2 x float> zeroinitializer, [[TMP0]]
13 ; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX10_I_I86]], align 4
14 ; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr undef, align 4
15 ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x float> <float 0.000000e+00, float poison>, float [[TMP2]], i32 1
16 ; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x float> [[TMP0]], float [[TMP3]], i32 0
17 ; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> <float poison, float 0.000000e+00>, <2 x i32> <i32 1, i32 3>
18 ; CHECK-NEXT: [[TMP7:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP4]], <2 x float> [[TMP5]], <2 x float> [[TMP6]])
19 ; CHECK-NEXT: br i1 false, label [[BB2:%.*]], label [[BB3:%.*]]
21 ; CHECK-NEXT: [[TMP8:%.*]] = fmul <2 x float> [[TMP7]], zeroinitializer
22 ; CHECK-NEXT: br label [[BB3]]
24 ; CHECK-NEXT: [[TMP9:%.*]] = phi <2 x float> [ [[TMP8]], [[BB2]] ], [ zeroinitializer, [[BB1]] ]
25 ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x float> [[TMP9]], <2 x float> poison, <2 x i32> <i32 1, i32 0>
26 ; CHECK-NEXT: [[TMP11:%.*]] = fadd <2 x float> [[TMP1]], [[TMP10]]
27 ; CHECK-NEXT: [[TMP12:%.*]] = fadd <2 x float> [[TMP11]], zeroinitializer
28 ; CHECK-NEXT: [[TMP13:%.*]] = fsub <2 x float> [[TMP12]], zeroinitializer
29 ; CHECK-NEXT: [[TMP14:%.*]] = fsub <2 x float> [[TMP13]], zeroinitializer
30 ; CHECK-NEXT: store <2 x float> [[TMP14]], ptr [[ARRAYIDX21_I]], align 16
31 ; CHECK-NEXT: ret void
34 %arrayidx10.i.i86 = getelementptr inbounds float, ptr undef, i64 2
35 %arrayidx6.i66.i = getelementptr inbounds float, ptr undef, i64 1
36 %arrayidx21.i = getelementptr inbounds [4 x float], ptr undef, i64 2
37 %arrayidx6.i109.i = getelementptr inbounds [4 x float], ptr undef, i64 2, i64 1
41 %0 = load float, ptr undef, align 4
42 %sub.i71.i = fsub float 0.000000e+00, %0
43 %1 = load float, ptr %arrayidx6.i66.i, align 4
44 %sub5.i74.i = fsub float 0.000000e+00, %1
45 %2 = load float, ptr %arrayidx10.i.i86, align 4
46 %3 = call float @llvm.fmuladd.f32(float %1, float %2, float 0.000000e+00)
47 %4 = load float, ptr undef, align 4
48 %5 = call float @llvm.fmuladd.f32(float 0.000000e+00, float %4, float %2)
49 br i1 false, label %bb2, label %bb3
52 %mul.i95 = fmul float %3, 0.000000e+00
53 %mul3.i96 = fmul float %5, 0.000000e+00
57 %vddir.sroa.8.0.i = phi float [ %mul3.i96, %bb2 ], [ 0.000000e+00, %bb1 ]
58 %vddir.sroa.0.0.i = phi float [ %mul.i95, %bb2 ], [ 0.000000e+00, %bb1 ]
59 %add.i.i = fadd float %sub.i71.i, %vddir.sroa.0.0.i
60 %add5.i.i = fadd float %sub5.i74.i, %vddir.sroa.8.0.i
61 %add.i105.i = fadd float %add.i.i, 0.000000e+00
62 %add5.i108.i = fadd float %add5.i.i, 0.000000e+00
63 %sub.i114.i = fsub float %add.i105.i, 0.000000e+00
64 %sub4.i.i = fsub float %add5.i108.i, 0.000000e+00
65 %sub.i118.i = fsub float %sub.i114.i, 0.000000e+00
66 store float %sub.i118.i, ptr %arrayidx21.i, align 16
67 %sub4.i121.i = fsub float %sub4.i.i, 0.000000e+00
68 store float %sub4.i121.i, ptr %arrayidx6.i109.i, align 4
72 declare float @llvm.fmuladd.f32(float, float, float)