1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -passes=loop-distribute -enable-loop-distribute -S < %s | \
3 ; RUN: FileCheck %s --check-prefix=DEFAULT
5 ; RUN: opt -passes=loop-distribute -enable-loop-distribute -S -enable-mem-access-versioning=0 < %s | \
6 ; RUN: FileCheck %s --check-prefix=NO-VERSION
8 ; If we don't speculate stride for 1 we can't distribute along the line
9 ; because we could have a backward dependence:
11 ; for (i = 0; i < n; i++) {
12 ; A[i + 1] = A[i] * B[i];
13 ; =======================
14 ; C[i] = D[i] * A[stride * i];
17 define void @f(ptr noalias %a, ptr noalias %b, ptr noalias %c, ptr noalias %d, i64 %stride) {
19 ; DEFAULT-NEXT: entry:
20 ; DEFAULT-NEXT: br label [[FOR_BODY_LVER_CHECK:%.*]]
21 ; DEFAULT: for.body.lver.check:
22 ; DEFAULT-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
23 ; DEFAULT-NEXT: br i1 [[IDENT_CHECK]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH_LDIST1:%.*]]
24 ; DEFAULT: for.body.ph.lver.orig:
25 ; DEFAULT-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
26 ; DEFAULT: for.body.lver.orig:
27 ; DEFAULT-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[ADD_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
28 ; DEFAULT-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IND_LVER_ORIG]]
29 ; DEFAULT-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXA_LVER_ORIG]], align 4
30 ; DEFAULT-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[IND_LVER_ORIG]]
31 ; DEFAULT-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXB_LVER_ORIG]], align 4
32 ; DEFAULT-NEXT: [[MULA_LVER_ORIG:%.*]] = mul i32 [[LOADB_LVER_ORIG]], [[LOADA_LVER_ORIG]]
33 ; DEFAULT-NEXT: [[ADD_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
34 ; DEFAULT-NEXT: [[ARRAYIDXA_PLUS_4_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[ADD_LVER_ORIG]]
35 ; DEFAULT-NEXT: store i32 [[MULA_LVER_ORIG]], ptr [[ARRAYIDXA_PLUS_4_LVER_ORIG]], align 4
36 ; DEFAULT-NEXT: [[ARRAYIDXD_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[D:%.*]], i64 [[IND_LVER_ORIG]]
37 ; DEFAULT-NEXT: [[LOADD_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXD_LVER_ORIG]], align 4
38 ; DEFAULT-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i64 [[IND_LVER_ORIG]], [[STRIDE]]
39 ; DEFAULT-NEXT: [[ARRAYIDXSTRIDEDA_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[MUL_LVER_ORIG]]
40 ; DEFAULT-NEXT: [[LOADSTRIDEDA_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXSTRIDEDA_LVER_ORIG]], align 4
41 ; DEFAULT-NEXT: [[MULC_LVER_ORIG:%.*]] = mul i32 [[LOADD_LVER_ORIG]], [[LOADSTRIDEDA_LVER_ORIG]]
42 ; DEFAULT-NEXT: [[ARRAYIDXC_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[IND_LVER_ORIG]]
43 ; DEFAULT-NEXT: store i32 [[MULC_LVER_ORIG]], ptr [[ARRAYIDXC_LVER_ORIG]], align 4
44 ; DEFAULT-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[ADD_LVER_ORIG]], 20
45 ; DEFAULT-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
46 ; DEFAULT: for.body.ph.ldist1:
47 ; DEFAULT-NEXT: br label [[FOR_BODY_LDIST1:%.*]]
48 ; DEFAULT: for.body.ldist1:
49 ; DEFAULT-NEXT: [[IND_LDIST1:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LDIST1]] ], [ [[ADD_LDIST1:%.*]], [[FOR_BODY_LDIST1]] ]
50 ; DEFAULT-NEXT: [[ARRAYIDXA_LDIST1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IND_LDIST1]]
51 ; DEFAULT-NEXT: [[LOADA_LDIST1:%.*]] = load i32, ptr [[ARRAYIDXA_LDIST1]], align 4
52 ; DEFAULT-NEXT: [[ARRAYIDXB_LDIST1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IND_LDIST1]]
53 ; DEFAULT-NEXT: [[LOADB_LDIST1:%.*]] = load i32, ptr [[ARRAYIDXB_LDIST1]], align 4
54 ; DEFAULT-NEXT: [[MULA_LDIST1:%.*]] = mul i32 [[LOADB_LDIST1]], [[LOADA_LDIST1]]
55 ; DEFAULT-NEXT: [[ADD_LDIST1]] = add nuw nsw i64 [[IND_LDIST1]], 1
56 ; DEFAULT-NEXT: [[ARRAYIDXA_PLUS_4_LDIST1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[ADD_LDIST1]]
57 ; DEFAULT-NEXT: store i32 [[MULA_LDIST1]], ptr [[ARRAYIDXA_PLUS_4_LDIST1]], align 4
58 ; DEFAULT-NEXT: [[EXITCOND_LDIST1:%.*]] = icmp eq i64 [[ADD_LDIST1]], 20
59 ; DEFAULT-NEXT: br i1 [[EXITCOND_LDIST1]], label [[FOR_BODY_PH:%.*]], label [[FOR_BODY_LDIST1]]
60 ; DEFAULT: for.body.ph:
61 ; DEFAULT-NEXT: br label [[FOR_BODY:%.*]]
63 ; DEFAULT-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
64 ; DEFAULT-NEXT: [[ADD]] = add nuw nsw i64 [[IND]], 1
65 ; DEFAULT-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[IND]]
66 ; DEFAULT-NEXT: [[LOADD:%.*]] = load i32, ptr [[ARRAYIDXD]], align 4
67 ; DEFAULT-NEXT: [[MUL:%.*]] = mul i64 [[IND]], [[STRIDE]]
68 ; DEFAULT-NEXT: [[ARRAYIDXSTRIDEDA:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[MUL]]
69 ; DEFAULT-NEXT: [[LOADSTRIDEDA:%.*]] = load i32, ptr [[ARRAYIDXSTRIDEDA]], align 4
70 ; DEFAULT-NEXT: [[MULC:%.*]] = mul i32 [[LOADD]], [[LOADSTRIDEDA]]
71 ; DEFAULT-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IND]]
72 ; DEFAULT-NEXT: store i32 [[MULC]], ptr [[ARRAYIDXC]], align 4
73 ; DEFAULT-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], 20
74 ; DEFAULT-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT1:%.*]], label [[FOR_BODY]]
75 ; DEFAULT: for.end.loopexit:
76 ; DEFAULT-NEXT: br label [[FOR_END:%.*]]
77 ; DEFAULT: for.end.loopexit1:
78 ; DEFAULT-NEXT: br label [[FOR_END]]
80 ; DEFAULT-NEXT: ret void
82 ; NO-VERSION-LABEL: @f(
83 ; NO-VERSION-NEXT: entry:
84 ; NO-VERSION-NEXT: br label [[FOR_BODY:%.*]]
85 ; NO-VERSION: for.body:
86 ; NO-VERSION-NEXT: [[IND:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
87 ; NO-VERSION-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IND]]
88 ; NO-VERSION-NEXT: [[LOADA:%.*]] = load i32, ptr [[ARRAYIDXA]], align 4
89 ; NO-VERSION-NEXT: [[ARRAYIDXB:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[IND]]
90 ; NO-VERSION-NEXT: [[LOADB:%.*]] = load i32, ptr [[ARRAYIDXB]], align 4
91 ; NO-VERSION-NEXT: [[MULA:%.*]] = mul i32 [[LOADB]], [[LOADA]]
92 ; NO-VERSION-NEXT: [[ADD]] = add nuw nsw i64 [[IND]], 1
93 ; NO-VERSION-NEXT: [[ARRAYIDXA_PLUS_4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[ADD]]
94 ; NO-VERSION-NEXT: store i32 [[MULA]], ptr [[ARRAYIDXA_PLUS_4]], align 4
95 ; NO-VERSION-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, ptr [[D:%.*]], i64 [[IND]]
96 ; NO-VERSION-NEXT: [[LOADD:%.*]] = load i32, ptr [[ARRAYIDXD]], align 4
97 ; NO-VERSION-NEXT: [[MUL:%.*]] = mul i64 [[IND]], [[STRIDE:%.*]]
98 ; NO-VERSION-NEXT: [[ARRAYIDXSTRIDEDA:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[MUL]]
99 ; NO-VERSION-NEXT: [[LOADSTRIDEDA:%.*]] = load i32, ptr [[ARRAYIDXSTRIDEDA]], align 4
100 ; NO-VERSION-NEXT: [[MULC:%.*]] = mul i32 [[LOADD]], [[LOADSTRIDEDA]]
101 ; NO-VERSION-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[IND]]
102 ; NO-VERSION-NEXT: store i32 [[MULC]], ptr [[ARRAYIDXC]], align 4
103 ; NO-VERSION-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], 20
104 ; NO-VERSION-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
105 ; NO-VERSION: for.end:
106 ; NO-VERSION-NEXT: ret void
113 for.body: ; preds = %for.body, %entry
114 %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
116 %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
117 %loadA = load i32, ptr %arrayidxA, align 4
119 %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %ind
120 %loadB = load i32, ptr %arrayidxB, align 4
122 %mulA = mul i32 %loadB, %loadA
124 %add = add nuw nsw i64 %ind, 1
125 %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i64 %add
126 store i32 %mulA, ptr %arrayidxA_plus_4, align 4
128 %arrayidxD = getelementptr inbounds i32, ptr %d, i64 %ind
129 %loadD = load i32, ptr %arrayidxD, align 4
131 %mul = mul i64 %ind, %stride
132 %arrayidxStridedA = getelementptr inbounds i32, ptr %a, i64 %mul
133 %loadStridedA = load i32, ptr %arrayidxStridedA, align 4
135 %mulC = mul i32 %loadD, %loadStridedA
137 %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %ind
138 store i32 %mulC, ptr %arrayidxC, align 4
140 %exitcond = icmp eq i64 %add, 20
141 br i1 %exitcond, label %for.end, label %for.body
143 for.end: ; preds = %for.body