1 ; RUN: opt < %s -loop-reduce -S | FileCheck %s
3 ; CHECK: load double, double addrspace(1)* [[IV:%[^,]+]]
4 ; CHECK: store double {{.*}}, double addrspace(1)* [[IV]]
7 ; Make sure the GEP has the right index type
8 ; CHECK: getelementptr double, double addrspace(1)* [[IV]], i16 1
9 ; CHECK: br {{.*}} label %bb1
11 ; Make sure the GEP has the right index type
12 ; CHECK: getelementptr double, double addrspace(1)* {{.*}}, i16
15 ; This test tests several things. The load and store should use the
16 ; same address instead of having it computed twice, and SCEVExpander should
17 ; be able to reconstruct the full getelementptr, despite it having a few
18 ; obstacles set in its way.
19 ; We only check that the inner loop (bb1-bb2) is "reduced" because LSR
20 ; currently only operates on inner loops.
22 target datalayout = "e-p:64:64:64-p1:16:16:16-n16:32:64"
24 define void @foo(i64 %n, i64 %m, i64 %o, i64 %q, double addrspace(1)* nocapture %p) nounwind {
26 %tmp = icmp sgt i64 %n, 0 ; <i1> [#uses=1]
27 br i1 %tmp, label %bb.nph3, label %return
29 bb.nph: ; preds = %bb2.preheader
30 %tmp1 = mul i64 %tmp16, %i.02 ; <i64> [#uses=1]
31 %tmp2 = mul i64 %tmp19, %i.02 ; <i64> [#uses=1]
34 bb1: ; preds = %bb2, %bb.nph
35 %j.01 = phi i64 [ %tmp9, %bb2 ], [ 0, %bb.nph ] ; <i64> [#uses=3]
36 %tmp3 = add i64 %j.01, %tmp1 ; <i64> [#uses=1]
37 %tmp4 = add i64 %j.01, %tmp2 ; <i64> [#uses=1]
38 %z0 = add i64 %tmp3, 5203
39 %tmp5 = getelementptr double, double addrspace(1)* %p, i64 %z0 ; <double addrspace(1)*> [#uses=1]
40 %tmp6 = load double, double addrspace(1)* %tmp5, align 8 ; <double> [#uses=1]
41 %tmp7 = fdiv double %tmp6, 2.100000e+00 ; <double> [#uses=1]
42 %z1 = add i64 %tmp4, 5203
43 %tmp8 = getelementptr double, double addrspace(1)* %p, i64 %z1 ; <double addrspace(1)*> [#uses=1]
44 store double %tmp7, double addrspace(1)* %tmp8, align 8
45 %tmp9 = add i64 %j.01, 1 ; <i64> [#uses=2]
49 %tmp10 = icmp slt i64 %tmp9, %m ; <i1> [#uses=1]
50 br i1 %tmp10, label %bb1, label %bb2.bb3_crit_edge
52 bb2.bb3_crit_edge: ; preds = %bb2
55 bb3: ; preds = %bb2.preheader, %bb2.bb3_crit_edge
56 %tmp11 = add i64 %i.02, 1 ; <i64> [#uses=2]
60 %tmp12 = icmp slt i64 %tmp11, %n ; <i1> [#uses=1]
61 br i1 %tmp12, label %bb2.preheader, label %bb4.return_crit_edge
63 bb4.return_crit_edge: ; preds = %bb4
64 br label %bb4.return_crit_edge.split
66 bb4.return_crit_edge.split: ; preds = %bb.nph3, %bb4.return_crit_edge
69 bb.nph3: ; preds = %entry
70 %tmp13 = icmp sgt i64 %m, 0 ; <i1> [#uses=1]
71 %tmp14 = mul i64 %n, 37 ; <i64> [#uses=1]
72 %tmp15 = mul i64 %tmp14, %o ; <i64> [#uses=1]
73 %tmp16 = mul i64 %tmp15, %q ; <i64> [#uses=1]
74 %tmp17 = mul i64 %n, 37 ; <i64> [#uses=1]
75 %tmp18 = mul i64 %tmp17, %o ; <i64> [#uses=1]
76 %tmp19 = mul i64 %tmp18, %q ; <i64> [#uses=1]
77 br i1 %tmp13, label %bb.nph3.split, label %bb4.return_crit_edge.split
79 bb.nph3.split: ; preds = %bb.nph3
80 br label %bb2.preheader
82 bb2.preheader: ; preds = %bb.nph3.split, %bb4
83 %i.02 = phi i64 [ %tmp11, %bb4 ], [ 0, %bb.nph3.split ] ; <i64> [#uses=3]
84 br i1 true, label %bb.nph, label %bb3
86 return: ; preds = %bb4.return_crit_edge.split, %entry