1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -S -passes=loop-vectorize -force-vector-width=4 -force-vector-interleave=1 < %s | FileCheck %s
4 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-ni:1"
6 define void @foo(ptr %ptr, ptr %ptr.2) {
9 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
10 ; CHECK: vector.memcheck:
11 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[PTR_2:%.*]], i64 4
12 ; CHECK-NEXT: [[UGLYGEP1:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i64 640
13 ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[PTR_2]], [[UGLYGEP1]]
14 ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[PTR]], [[UGLYGEP]]
15 ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
16 ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
18 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
20 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
21 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 2, i64 3, i64 4, i64 5>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
22 ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 0
23 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 2, [[INDEX]]
24 ; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[OFFSET_IDX]] to i32
25 ; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[TMP0]], 0
26 ; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP0]], 1
27 ; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP0]], 2
28 ; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[TMP0]], 3
29 ; CHECK-NEXT: store i32 [[TMP4]], ptr [[PTR_2]], align 4, !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]]
30 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[PTR]], i64 [[TMP5]]
31 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[TMP6]], i32 0
32 ; CHECK-NEXT: store <4 x i64> [[VEC_IND]], ptr [[TMP7]], align 8, !alias.scope [[META3]]
33 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
34 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
35 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 80
36 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
37 ; CHECK: middle.block:
38 ; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
40 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 80, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
41 ; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i64 [ 82, [[MIDDLE_BLOCK]] ], [ 2, [[ENTRY]] ], [ 2, [[VECTOR_MEMCHECK]] ]
42 ; CHECK-NEXT: br label [[LOOP:%.*]]
43 ; CHECK: vector.scevcheck:
44 ; CHECK-NEXT: unreachable
46 ; CHECK-NEXT: [[CAN_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[CAN_IV_NEXT:%.*]], [[LOOP]] ]
47 ; CHECK-NEXT: [[TMP9:%.*]] = phi i64 [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ], [ [[TMP12:%.*]], [[LOOP]] ]
48 ; CHECK-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], 4294967295
49 ; CHECK-NEXT: [[TMP11:%.*]] = trunc i64 [[TMP9]] to i32
50 ; CHECK-NEXT: store i32 [[TMP11]], ptr [[PTR_2]], align 4
51 ; CHECK-NEXT: [[GEP_PTR:%.*]] = getelementptr inbounds i64, ptr [[PTR]], i64 [[CAN_IV]]
52 ; CHECK-NEXT: store i64 [[TMP9]], ptr [[GEP_PTR]], align 8
53 ; CHECK-NEXT: [[TMP12]] = add nuw nsw i64 [[TMP10]], 1
54 ; CHECK-NEXT: [[TMP13:%.*]] = icmp sgt i32 [[TMP11]], 80
55 ; CHECK-NEXT: [[CAN_IV_NEXT]] = add nuw nsw i64 [[CAN_IV]], 1
56 ; CHECK-NEXT: br i1 [[TMP13]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]]
58 ; CHECK-NEXT: ret void
64 %can.iv = phi i64 [ 0, %entry ], [ %can.iv.next, %loop ]
65 %0 = phi i64 [ 2, %entry ], [ %3, %loop ]
66 %1 = and i64 %0, 4294967295
67 %2 = trunc i64 %0 to i32
68 store i32 %2, ptr %ptr.2
69 %gep.ptr = getelementptr inbounds i64, ptr %ptr, i64 %can.iv
70 store i64 %0, ptr %gep.ptr
71 %3 = add nuw nsw i64 %1, 1
72 %4 = icmp sgt i32 %2, 80
73 %can.iv.next = add nuw nsw i64 %can.iv, 1
74 br i1 %4, label %exit, label %loop