1 ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 3
2 ; RUN: opt -S -disable-output -passes='print<access-info>' %s 2>&1 | FileCheck %s
5 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
7 ; A forwarding in the presence of symbolic strides.
8 define void @single_stride(ptr noalias %A, ptr noalias %B, i64 %N, i64 %stride) {
9 ; CHECK-LABEL: 'single_stride'
11 ; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
12 ; CHECK-NEXT: Backward loop carried data dependence.
13 ; CHECK-NEXT: Dependences:
14 ; CHECK-NEXT: Backward:
15 ; CHECK-NEXT: %load = load i32, ptr %gep.A, align 4 ->
16 ; CHECK-NEXT: store i32 %add, ptr %gep.A.next, align 4
18 ; CHECK-NEXT: Run-time memory checks:
19 ; CHECK-NEXT: Grouped accesses:
21 ; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
22 ; CHECK-NEXT: SCEV assumptions:
23 ; CHECK-NEXT: Equal predicate: %stride == 1
25 ; CHECK-NEXT: Expressions re-written:
26 ; CHECK-NEXT: [PSE] %gep.A = getelementptr inbounds i32, ptr %A, i64 %mul:
27 ; CHECK-NEXT: {%A,+,(4 * %stride)}<%loop>
28 ; CHECK-NEXT: --> {%A,+,4}<%loop>
34 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
35 %mul = mul i64 %iv, %stride
36 %gep.A = getelementptr inbounds i32, ptr %A, i64 %mul
37 %load = load i32, ptr %gep.A, align 4
38 %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv
39 %load_1 = load i32, ptr %gep.B, align 4
40 %add = add i32 %load_1, %load
41 %iv.next = add nuw nsw i64 %iv, 1
42 %gep.A.next = getelementptr inbounds i32, ptr %A, i64 %iv.next
43 store i32 %add, ptr %gep.A.next, align 4
44 %exitcond = icmp eq i64 %iv.next, %N
45 br i1 %exitcond, label %exit, label %loop
51 ; Similar to @single_stride, but with struct types.
52 define void @single_stride_struct(ptr noalias %A, ptr noalias %B, i64 %N, i64 %stride) {
53 ; CHECK-LABEL: 'single_stride_struct'
55 ; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
56 ; CHECK-NEXT: Backward loop carried data dependence.
57 ; CHECK-NEXT: Dependences:
58 ; CHECK-NEXT: Backward:
59 ; CHECK-NEXT: %load = load { i32, i8 }, ptr %gep.A, align 4 ->
60 ; CHECK-NEXT: store { i32, i8 } %ins, ptr %gep.A.next, align 4
62 ; CHECK-NEXT: Run-time memory checks:
63 ; CHECK-NEXT: Grouped accesses:
65 ; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
66 ; CHECK-NEXT: SCEV assumptions:
67 ; CHECK-NEXT: Equal predicate: %stride == 1
69 ; CHECK-NEXT: Expressions re-written:
70 ; CHECK-NEXT: [PSE] %gep.A = getelementptr inbounds { i32, i8 }, ptr %A, i64 %mul:
71 ; CHECK-NEXT: {%A,+,(8 * %stride)}<%loop>
72 ; CHECK-NEXT: --> {%A,+,8}<%loop>
78 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
79 %mul = mul i64 %iv, %stride
80 %gep.A = getelementptr inbounds { i32, i8 }, ptr %A, i64 %mul
81 %load = load { i32, i8 }, ptr %gep.A, align 4
82 %gep.B = getelementptr inbounds { i32, i8 }, ptr %B, i64 %iv
83 %load_1 = load { i32, i8 }, ptr %gep.B, align 4
84 %v1 = extractvalue { i32, i8 } %load, 0
85 %v2 = extractvalue { i32, i8} %load_1, 0
86 %add = add i32 %v1, %v2
87 %ins = insertvalue { i32, i8 } undef, i32 %add, 0
88 %iv.next = add nuw nsw i64 %iv, 1
89 %gep.A.next = getelementptr inbounds { i32, i8 }, ptr %A, i64 %iv.next
90 store { i32, i8 } %ins, ptr %gep.A.next, align 4
91 %exitcond = icmp eq i64 %iv.next, %N
92 br i1 %exitcond, label %exit, label %loop
98 ; A loop with two symbolic strides.
99 define void @two_strides(ptr noalias %A, ptr noalias %B, i64 %N, i64 %stride.1, i64 %stride.2) {
100 ; CHECK-LABEL: 'two_strides'
102 ; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
103 ; CHECK-NEXT: Backward loop carried data dependence.
104 ; CHECK-NEXT: Dependences:
105 ; CHECK-NEXT: Backward:
106 ; CHECK-NEXT: %load = load i32, ptr %gep.A, align 4 ->
107 ; CHECK-NEXT: store i32 %add, ptr %gep.A.next, align 4
109 ; CHECK-NEXT: Run-time memory checks:
110 ; CHECK-NEXT: Grouped accesses:
112 ; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
113 ; CHECK-NEXT: SCEV assumptions:
114 ; CHECK-NEXT: Equal predicate: %stride.2 == 1
115 ; CHECK-NEXT: Equal predicate: %stride.1 == 1
117 ; CHECK-NEXT: Expressions re-written:
118 ; CHECK-NEXT: [PSE] %gep.A = getelementptr inbounds i32, ptr %A, i64 %mul:
119 ; CHECK-NEXT: {%A,+,(4 * %stride.1)}<%loop>
120 ; CHECK-NEXT: --> {%A,+,4}<%loop>
121 ; CHECK-NEXT: [PSE] %gep.A.next = getelementptr inbounds i32, ptr %A, i64 %mul.2:
122 ; CHECK-NEXT: {((4 * %stride.2) + %A),+,(4 * %stride.2)}<%loop>
123 ; CHECK-NEXT: --> {(4 + %A),+,4}<%loop>
129 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
130 %mul = mul i64 %iv, %stride.1
131 %gep.A = getelementptr inbounds i32, ptr %A, i64 %mul
132 %load = load i32, ptr %gep.A, align 4
133 %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv
134 %load_1 = load i32, ptr %gep.B, align 4
135 %add = add i32 %load_1, %load
136 %iv.next = add nuw nsw i64 %iv, 1
137 %mul.2 = mul i64 %iv.next, %stride.2
138 %gep.A.next = getelementptr inbounds i32, ptr %A, i64 %mul.2
139 store i32 %add, ptr %gep.A.next, align 4
140 %exitcond = icmp eq i64 %iv.next, %N
141 br i1 %exitcond, label %exit, label %loop