1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
4 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5 target triple = "x86_64-apple-macosx10.8.0"
7 ; double foo(double * restrict b, double * restrict a, int n, int m) {
11 ; for (int i=0; i < 100; i++) {
16 ; x = g; <----- external user!
23 ; return x; <-- must extract here!
26 define double @ext_user(double* noalias nocapture %B, double* noalias nocapture %A, i32 %n, i32 %m) {
27 ; CHECK-LABEL: @ext_user(
29 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
30 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
31 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
33 ; CHECK-NEXT: [[I_020:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
34 ; CHECK-NEXT: [[TMP2:%.*]] = phi <2 x double> [ [[TMP1]], [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ]
35 ; CHECK-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP2]], <double 1.000000e+01, double 1.000000e+01>
36 ; CHECK-NEXT: [[TMP4:%.*]] = fmul <2 x double> [[TMP3]], <double 4.000000e+00, double 4.000000e+00>
37 ; CHECK-NEXT: [[TMP5]] = fadd <2 x double> [[TMP4]], <double 4.000000e+00, double 4.000000e+00>
38 ; CHECK-NEXT: [[INC]] = add nsw i32 [[I_020]], 1
39 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], 100
40 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
42 ; CHECK-NEXT: [[TMP6:%.*]] = bitcast double* [[B:%.*]] to <2 x double>*
43 ; CHECK-NEXT: store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 8
44 ; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x double> [[TMP4]], i32 0
45 ; CHECK-NEXT: ret double [[TMP7]]
48 %arrayidx = getelementptr inbounds double, double* %A, i64 1
49 %0 = load double, double* %arrayidx, align 8
50 %1 = load double, double* %A, align 8
53 for.body: ; preds = %for.body, %entry
54 %i.020 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
55 %G.019 = phi double [ %1, %entry ], [ %add5, %for.body ]
56 %R.018 = phi double [ %0, %entry ], [ %add4, %for.body ]
57 %add = fadd double %R.018, 1.000000e+01
58 %add2 = fadd double %G.019, 1.000000e+01
59 %mul = fmul double %add, 4.000000e+00
60 %mul3 = fmul double %add2, 4.000000e+00
61 %add4 = fadd double %mul, 4.000000e+00
62 %add5 = fadd double %mul3, 4.000000e+00
63 %inc = add nsw i32 %i.020, 1
64 %exitcond = icmp eq i32 %inc, 100
65 br i1 %exitcond, label %for.end, label %for.body
67 for.end: ; preds = %for.body
68 store double %add5, double* %B, align 8
69 %arrayidx7 = getelementptr inbounds double, double* %B, i64 1
70 store double %add4, double* %arrayidx7, align 8
74 ; A need-to-gather entry cannot be an external use of the scalar element.
75 ; Instead the insertelement instructions of the need-to-gather entry are the
77 ; This test would assert because we would keep the scalar fpext and fadd alive.
80 define i32 @needtogather(double *noalias %a, i32 *noalias %b, float * noalias %c,
81 ; CHECK-LABEL: @needtogather(
83 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[D:%.*]], align 4
84 ; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
85 ; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[C:%.*]]
86 ; CHECK-NEXT: [[SUB:%.*]] = fsub float 0.000000e+00, [[TMP1]]
87 ; CHECK-NEXT: [[MUL:%.*]] = fmul float [[SUB]], 0.000000e+00
88 ; CHECK-NEXT: [[ADD:%.*]] = fadd float [[CONV]], [[MUL]]
89 ; CHECK-NEXT: [[CONV1:%.*]] = fpext float [[ADD]] to double
90 ; CHECK-NEXT: [[SUB3:%.*]] = fsub float 1.000000e+00, [[TMP1]]
91 ; CHECK-NEXT: [[MUL4:%.*]] = fmul float [[SUB3]], 0.000000e+00
92 ; CHECK-NEXT: [[ADD5:%.*]] = fadd float [[CONV]], [[MUL4]]
93 ; CHECK-NEXT: [[CONV6:%.*]] = fpext float [[ADD5]] to double
94 ; CHECK-NEXT: [[TOBOOL:%.*]] = fcmp une float [[ADD]], 0.000000e+00
95 ; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
97 ; CHECK-NEXT: br label [[IF_END]]
99 ; CHECK-NEXT: [[STOREMERGE:%.*]] = phi double [ [[CONV6]], [[IF_THEN]] ], [ [[CONV1]], [[ENTRY:%.*]] ]
100 ; CHECK-NEXT: [[E_0:%.*]] = phi double [ [[CONV1]], [[IF_THEN]] ], [ [[CONV6]], [[ENTRY]] ]
101 ; CHECK-NEXT: store double [[STOREMERGE]], double* [[A:%.*]], align 8
102 ; CHECK-NEXT: [[CONV7:%.*]] = fptosi double [[E_0]] to i32
103 ; CHECK-NEXT: store i32 [[CONV7]], i32* [[B:%.*]], align 4
104 ; CHECK-NEXT: ret i32 undef
108 %0 = load i32, i32* %d, align 4
109 %conv = sitofp i32 %0 to float
110 %1 = load float, float* %c
111 %sub = fsub float 0.000000e+00, %1
112 %mul = fmul float %sub, 0.000000e+00
113 %add = fadd float %conv, %mul
114 %conv1 = fpext float %add to double
115 %sub3 = fsub float 1.000000e+00, %1
116 %mul4 = fmul float %sub3, 0.000000e+00
117 %add5 = fadd float %conv, %mul4
118 %conv6 = fpext float %add5 to double
119 %tobool = fcmp une float %add, 0.000000e+00
120 br i1 %tobool, label %if.then, label %if.end
126 %storemerge = phi double [ %conv6, %if.then ], [ %conv1, %entry ]
127 %e.0 = phi double [ %conv1, %if.then ], [ %conv6, %entry ]
128 store double %storemerge, double* %a, align 8
129 %conv7 = fptosi double %e.0 to i32
130 store i32 %conv7, i32* %b, align 4