1 ; RUN: opt -loop-vectorize -hexagon-autohvx=1 -force-vector-width=64 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S %s | FileCheck %s
3 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
4 target triple = "hexagon"
8 ; Check that interleave groups and decisions based on them are correctly
9 ; invalidated with tail-folding on platforms where masked interleaved accesses
12 ; Make sure a vector body has been created, 64 element vectors are used and a block predicate has been computed.
13 ; Also make sure the loads are not widened.
17 ; CHECK: icmp ule <64 x i32> %vec.ind
18 ; CHECK-NOT: load <{{.*}} x i32>
21 define void @test1(i32* %arg, i32 %N) #0 {
26 loop: ; preds = %bb2, %bb
27 %iv = phi i32 [ %iv.next, %loop], [ 0, %entry ]
28 %idx.mul = mul nuw nsw i32 %iv, 7
29 %idx.start = add nuw nsw i32 %idx.mul, 1
30 %tmp6 = getelementptr inbounds i32, i32* %arg, i32 %idx.start
31 %tmp7 = load i32, i32* %tmp6, align 4
32 %tmp8 = add nuw nsw i32 %idx.start, 1
33 %tmp9 = getelementptr inbounds i32, i32* %arg, i32 %tmp8
34 %tmp10 = load i32, i32* %tmp9, align 4
35 %tmp11 = add nuw nsw i32 %idx.start, 2
36 %tmp12 = getelementptr inbounds i32, i32* %arg, i32 %tmp11
37 %tmp13 = load i32, i32* %tmp12, align 4
38 %tmp14 = add nuw nsw i32 %idx.start, 3
39 %tmp15 = getelementptr inbounds i32, i32* %arg, i32 %tmp14
40 %tmp16 = load i32, i32* %tmp15, align 4
41 %tmp18 = add nuw nsw i32 %idx.start, 4
42 %tmp19 = getelementptr inbounds i32, i32* %arg, i32 %tmp18
43 %tmp20 = load i32, i32* %tmp19, align 4
44 %tmp21 = add nuw nsw i32 %idx.start, 5
45 %tmp22 = getelementptr inbounds i32, i32* %arg, i32 %tmp21
46 %tmp23 = load i32, i32* %tmp22, align 4
47 %tmp25 = add nuw nsw i32 %idx.start, 6
48 %tmp26 = getelementptr inbounds i32, i32* %arg, i32 %tmp25
49 %tmp27 = load i32, i32* %tmp26, align 4
50 store i8 0, i8* %tmp, align 1
51 %iv.next= add nuw nsw i32 %iv, 1
52 %exit.cond = icmp eq i32 %iv.next, %N
53 br i1 %exit.cond, label %exit, label %loop
59 ; The loop below only requires tail folding due to interleave groups with gaps.
60 ; Make sure the loads are not widened.
64 ; CHECK-NOT: load <{{.*}} x i32>
65 define void @test2(i32* %arg) #1 {
70 loop: ; preds = %bb2, %bb
71 %iv = phi i32 [ %iv.next, %loop], [ 0, %entry ]
72 %idx.start = mul nuw nsw i32 %iv, 5
73 %tmp6 = getelementptr inbounds i32, i32* %arg, i32 %idx.start
74 %tmp7 = load i32, i32* %tmp6, align 4
75 %tmp8 = add nuw nsw i32 %idx.start, 1
76 %tmp9 = getelementptr inbounds i32, i32* %arg, i32 %tmp8
77 %tmp10 = load i32, i32* %tmp9, align 4
78 %tmp11 = add nuw nsw i32 %idx.start, 2
79 %tmp12 = getelementptr inbounds i32, i32* %arg, i32 %tmp11
80 %tmp13 = load i32, i32* %tmp12, align 4
81 %tmp14 = add nuw nsw i32 %idx.start, 3
82 %tmp15 = getelementptr inbounds i32, i32* %arg, i32 %tmp14
83 %tmp16 = load i32, i32* %tmp15, align 4
84 store i8 0, i8* %tmp, align 1
85 %iv.next= add nuw nsw i32 %iv, 1
86 %exit.cond = icmp eq i32 %iv.next, 128
87 br i1 %exit.cond, label %exit, label %loop
94 attributes #0 = { "target-features"="+hvx,+hvx-length128b" }
95 attributes #1 = { optsize "target-features"="+hvx,+hvx-length128b" }