1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -mtriple=powerpc64le-unknown-linux-gnu -passes=simplifycfg -simplifycfg-require-and-preserve-domtree=1 -hoist-common-insts=true -enable-unsafe-fp-math -S | \
5 ; This case is copied from test/Transforms/SimplifyCFG/AArch64/
6 ; Function Attrs: nounwind
7 define double @_Z3fooRdS_S_S_(ptr dereferenceable(8) %x, ptr dereferenceable(8) %y, ptr dereferenceable(8) %a) {
8 ; CHECK-LABEL: @_Z3fooRdS_S_S_(
10 ; CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[Y:%.*]], align 8
11 ; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq double [[TMP0]], 0.000000e+00
12 ; CHECK-NEXT: [[TMP1:%.*]] = load double, ptr [[X:%.*]], align 8
13 ; CHECK-NEXT: [[TMP2:%.*]] = load double, ptr [[A:%.*]], align 8
14 ; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
16 ; CHECK-NEXT: [[TMP3:%.*]] = fmul fast double [[TMP1]], [[TMP2]]
17 ; CHECK-NEXT: [[MUL:%.*]] = fadd fast double 1.000000e+00, [[TMP3]]
18 ; CHECK-NEXT: store double [[MUL]], ptr [[Y]], align 8
19 ; CHECK-NEXT: br label [[IF_END:%.*]]
21 ; CHECK-NEXT: [[MUL1:%.*]] = fmul fast double [[TMP1]], [[TMP2]]
22 ; CHECK-NEXT: [[SUB1:%.*]] = fsub fast double [[MUL1]], [[TMP0]]
23 ; CHECK-NEXT: [[GEP1:%.*]] = getelementptr double, ptr [[Y]], i32 1
24 ; CHECK-NEXT: store double [[SUB1]], ptr [[GEP1]], align 8
25 ; CHECK-NEXT: br label [[IF_END]]
27 ; CHECK-NEXT: [[TMP4:%.*]] = load double, ptr [[Y]], align 8
28 ; CHECK-NEXT: [[CMP2:%.*]] = fcmp oeq double [[TMP4]], 2.000000e+00
29 ; CHECK-NEXT: [[TMP5:%.*]] = load double, ptr [[X]], align 8
30 ; CHECK-NEXT: br i1 [[CMP2]], label [[IF_THEN2:%.*]], label [[IF_ELSE2:%.*]]
32 ; CHECK-NEXT: [[TMP6:%.*]] = load double, ptr [[A]], align 8
33 ; CHECK-NEXT: [[TMP7:%.*]] = fmul fast double [[TMP5]], 3.000000e+00
34 ; CHECK-NEXT: [[MUL2:%.*]] = fsub fast double [[TMP6]], [[TMP7]]
35 ; CHECK-NEXT: store double [[MUL2]], ptr [[Y]], align 8
36 ; CHECK-NEXT: br label [[IF_END2:%.*]]
38 ; CHECK-NEXT: [[MUL3:%.*]] = fmul fast double [[TMP5]], 3.000000e+00
39 ; CHECK-NEXT: [[NEG:%.*]] = fsub fast double 0.000000e+00, [[MUL3]]
40 ; CHECK-NEXT: [[SUB2:%.*]] = fsub fast double [[NEG]], 3.000000e+00
41 ; CHECK-NEXT: store double [[SUB2]], ptr [[Y]], align 8
42 ; CHECK-NEXT: br label [[IF_END2]]
44 ; CHECK-NEXT: [[TMP8:%.*]] = load double, ptr [[X]], align 8
45 ; CHECK-NEXT: [[TMP9:%.*]] = load double, ptr [[Y]], align 8
46 ; CHECK-NEXT: [[ADD:%.*]] = fadd fast double [[TMP8]], [[TMP9]]
47 ; CHECK-NEXT: [[TMP10:%.*]] = load double, ptr [[A]], align 8
48 ; CHECK-NEXT: [[ADD2:%.*]] = fadd fast double [[ADD]], [[TMP10]]
49 ; CHECK-NEXT: ret double [[ADD2]]
52 %0 = load double, ptr %y, align 8
53 %cmp = fcmp oeq double %0, 0.000000e+00
54 %1 = load double, ptr %x, align 8
55 br i1 %cmp, label %if.then, label %if.else
57 ; fadd (const, (fmul x, y))
58 if.then: ; preds = %entry
59 %2 = load double, ptr %a, align 8
60 %3 = fmul fast double %1, %2
61 %mul = fadd fast double 1.000000e+00, %3
62 store double %mul, ptr %y, align 8
65 ; fsub ((fmul x, y), z)
66 if.else: ; preds = %entry
67 %4 = load double, ptr %a, align 8
68 %mul1 = fmul fast double %1, %4
69 %sub1 = fsub fast double %mul1, %0
70 %gep1 = getelementptr double, ptr %y, i32 1
71 store double %sub1, ptr %gep1, align 8
74 if.end: ; preds = %if.else, %if.then
75 %5 = load double, ptr %y, align 8
76 %cmp2 = fcmp oeq double %5, 2.000000e+00
77 %6 = load double, ptr %x, align 8
78 br i1 %cmp2, label %if.then2, label %if.else2
80 ; fsub (x, (fmul y, z))
81 if.then2: ; preds = %entry
82 %7 = load double, ptr %a, align 8
83 %8 = fmul fast double %6, 3.0000000e+00
84 %mul2 = fsub fast double %7, %8
85 store double %mul2, ptr %y, align 8
88 ; fsub (fneg((fmul x, y)), const)
89 if.else2: ; preds = %entry
90 %mul3 = fmul fast double %6, 3.0000000e+00
91 %neg = fsub fast double 0.0000000e+00, %mul3
92 %sub2 = fsub fast double %neg, 3.0000000e+00
93 store double %sub2, ptr %y, align 8
96 if.end2: ; preds = %if.else, %if.then
97 %9 = load double, ptr %x, align 8
98 %10 = load double, ptr %y, align 8
99 %add = fadd fast double %9, %10
100 %11 = load double, ptr %a, align 8
101 %add2 = fadd fast double %add, %11