1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt %s -passes=loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S | FileCheck %s
4 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
6 define void @same_step_and_size(i32* %a, i32* %b, i64 %n) {
7 ; CHECK-LABEL: @same_step_and_size(
9 ; CHECK-NEXT: [[A2:%.*]] = ptrtoint i32* [[A:%.*]] to i64
10 ; CHECK-NEXT: [[B1:%.*]] = ptrtoint i32* [[B:%.*]] to i64
11 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
12 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
13 ; CHECK: vector.memcheck:
14 ; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[B1]], [[A2]]
15 ; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16
16 ; CHECK-NEXT: br i1 [[DIFF_CHECK]], label %scalar.ph, label %vector.ph
22 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
23 %gep.a = getelementptr inbounds i32, i32* %a, i64 %iv
24 %l = load i32, i32* %gep.a
25 %mul = mul nsw i32 %l, 3
26 %gep.b = getelementptr inbounds i32, i32* %b, i64 %iv
27 store i32 %mul, i32* %gep.b
28 %iv.next = add nuw nsw i64 %iv, 1
29 %exitcond = icmp eq i64 %iv.next, %n
30 br i1 %exitcond, label %exit, label %loop
36 define void @same_step_and_size_no_dominance_between_accesses(i32* %a, i32* %b, i64 %n, i64 %x) {
37 ; CHECK-LABEL: @same_step_and_size_no_dominance_between_accesses(
39 ; CHECK-NEXT: [[B2:%.*]] = ptrtoint i32* [[B:%.*]] to i64
40 ; CHECK-NEXT: [[A1:%.*]] = ptrtoint i32* [[A:%.*]] to i64
41 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
42 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
43 ; CHECK: vector.memcheck:
44 ; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[A1]], [[B2]]
45 ; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16
46 ; CHECK-NEXT: br i1 [[DIFF_CHECK]], label %scalar.ph, label %vector.ph
52 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
53 %cmp = icmp ne i64 %iv, %x
54 br i1 %cmp, label %then, label %else
57 %gep.a = getelementptr inbounds i32, i32* %a, i64 %iv
58 store i32 0, i32* %gep.a
62 %gep.b = getelementptr inbounds i32, i32* %b, i64 %iv
63 store i32 10, i32* %gep.b
67 %iv.next = add nuw nsw i64 %iv, 1
68 %exitcond = icmp eq i64 %iv.next, %n
69 br i1 %exitcond, label %exit, label %loop
75 define void @different_steps_and_different_access_sizes(i16* %a, i32* %b, i64 %n) {
76 ; CHECK-LABEL: @different_steps_and_different_access_sizes(
78 ; CHECK-NEXT: [[B1:%.*]] = bitcast i32* [[B:%.*]] to i8*
79 ; CHECK-NEXT: [[A3:%.*]] = bitcast i16* [[A:%.*]] to i8*
80 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
81 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
82 ; CHECK: vector.memcheck:
83 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[B]], i64 [[N]]
84 ; CHECK-NEXT: [[SCEVGEP2:%.*]] = bitcast i32* [[SCEVGEP]] to i8*
85 ; CHECK-NEXT: [[SCEVGEP4:%.*]] = getelementptr i16, i16* [[A]], i64 [[N]]
86 ; CHECK-NEXT: [[SCEVGEP45:%.*]] = bitcast i16* [[SCEVGEP4]] to i8*
87 ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult i8* [[B1]], [[SCEVGEP45]]
88 ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult i8* [[A3]], [[SCEVGEP2]]
89 ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
90 ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %scalar.ph, label %vector.ph
96 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
97 %gep.a = getelementptr inbounds i16, i16* %a, i64 %iv
98 %l = load i16, i16* %gep.a
99 %l.ext = sext i16 %l to i32
100 %mul = mul nsw i32 %l.ext, 3
101 %gep.b = getelementptr inbounds i32, i32* %b, i64 %iv
102 store i32 %mul, i32* %gep.b
103 %iv.next = add nuw nsw i64 %iv, 1
104 %exitcond = icmp eq i64 %iv.next, %n
105 br i1 %exitcond, label %exit, label %loop
111 define void @steps_match_but_different_access_sizes_1([2 x i16]* %a, i32* %b, i64 %n) {
112 ; CHECK-LABEL: @steps_match_but_different_access_sizes_1(
114 ; CHECK-NEXT: [[A2:%.*]] = ptrtoint [2 x i16]* [[A:%.*]] to i64
115 ; CHECK-NEXT: [[B1:%.*]] = ptrtoint i32* [[B:%.*]] to i64
116 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
117 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
118 ; CHECK: vector.memcheck:
119 ; CHECK-NEXT: [[TMP0:%.*]] = add nuw i64 [[A2]], 2
120 ; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[B1]], [[TMP0]]
121 ; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16
122 ; CHECK-NEXT: br i1 [[DIFF_CHECK]], label %scalar.ph, label %vector.ph
128 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
129 %gep.a = getelementptr inbounds [2 x i16], [2 x i16]* %a, i64 %iv, i64 1
130 %l = load i16, i16* %gep.a
131 %l.ext = sext i16 %l to i32
132 %mul = mul nsw i32 %l.ext, 3
133 %gep.b = getelementptr inbounds i32, i32* %b, i64 %iv
134 store i32 %mul, i32* %gep.b
135 %iv.next = add nuw nsw i64 %iv, 1
136 %exitcond = icmp eq i64 %iv.next, %n
137 br i1 %exitcond, label %exit, label %loop
143 ; Same as @steps_match_but_different_access_sizes_1, but with source and sink
145 define void @steps_match_but_different_access_sizes_2([2 x i16]* %a, i32* %b, i64 %n) {
146 ; CHECK-LABEL: @steps_match_but_different_access_sizes_2(
148 ; CHECK-NEXT: [[B2:%.*]] = ptrtoint i32* [[B:%.*]] to i64
149 ; CHECK-NEXT: [[A1:%.*]] = ptrtoint [2 x i16]* [[A:%.*]] to i64
150 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
151 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
152 ; CHECK: vector.memcheck:
153 ; CHECK-NEXT: [[TMP0:%.*]] = add nuw i64 [[A1]], 2
154 ; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[B2]]
155 ; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16
156 ; CHECK-NEXT: br i1 [[DIFF_CHECK]], label %scalar.ph, label %vector.ph
162 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
163 %gep.b = getelementptr inbounds i32, i32* %b, i64 %iv
164 %l = load i32, i32* %gep.b
165 %mul = mul nsw i32 %l, 3
166 %gep.a = getelementptr inbounds [2 x i16], [2 x i16]* %a, i64 %iv, i64 1
167 %trunc = trunc i32 %mul to i16
168 store i16 %trunc, i16* %gep.a
169 %iv.next = add nuw nsw i64 %iv, 1
170 %exitcond = icmp eq i64 %iv.next, %n
171 br i1 %exitcond, label %exit, label %loop