1 ; RUN: opt -basicaa -loop-distribute -enable-loop-distribute -S < %s | \
2 ; RUN: FileCheck %s --check-prefix=ALL --check-prefix=STRIDE_SPEC
4 ; RUN: opt -basicaa -loop-distribute -enable-loop-distribute -S -enable-mem-access-versioning=0 < %s | \
5 ; RUN: FileCheck %s --check-prefix=ALL --check-prefix=NO_STRIDE_SPEC
7 ; If we don't speculate stride for 1 we can't distribute along the line
8 ; because we could have a backward dependence:
10 ; for (i = 0; i < n; i++) {
11 ; A[i + 1] = A[i] * B[i];
12 ; =======================
13 ; C[i] = D[i] * A[stride * i];
16 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
17 target triple = "x86_64-apple-macosx10.10.0"
20 define void @f(i32* noalias %a,
28 ; STRIDE_SPEC: %ident.check = icmp ne i64 %stride, 1
30 ; STRIDE_SPEC: for.body.ldist1:
31 ; NO_STRIDE_SPEC-NOT: for.body.ldist1:
33 for.body: ; preds = %for.body, %entry
34 %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
36 %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
37 %loadA = load i32, i32* %arrayidxA, align 4
39 %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
40 %loadB = load i32, i32* %arrayidxB, align 4
42 %mulA = mul i32 %loadB, %loadA
44 %add = add nuw nsw i64 %ind, 1
45 %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
46 store i32 %mulA, i32* %arrayidxA_plus_4, align 4
48 %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
49 %loadD = load i32, i32* %arrayidxD, align 4
51 %mul = mul i64 %ind, %stride
52 %arrayidxStridedA = getelementptr inbounds i32, i32* %a, i64 %mul
53 %loadStridedA = load i32, i32* %arrayidxStridedA, align 4
55 %mulC = mul i32 %loadD, %loadStridedA
57 %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
58 store i32 %mulC, i32* %arrayidxC, align 4
60 %exitcond = icmp eq i64 %add, 20
61 br i1 %exitcond, label %for.end, label %for.body
63 for.end: ; preds = %for.body