1 ; RUN: opt -mtriple=thumbv8.1m.main -mve-tail-predication -tail-predication=enabled -mattr=+mve,+lob %s -S -o - | FileCheck %s
3 ; CHECK-LABEL: expand_v8i16_v8i32
4 ; CHECK-NOT: call i32 @llvm.arm.mve.vctp
5 define void @expand_v8i16_v8i32(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
7 %cmp8 = icmp eq i32 %N, 0
9 %tmp9 = lshr i32 %tmp8, 3
10 %tmp10 = shl nuw i32 %tmp9, 3
11 %tmp11 = add i32 %tmp10, -8
12 %tmp12 = lshr i32 %tmp11, 3
13 %tmp13 = add nuw nsw i32 %tmp12, 1
14 br i1 %cmp8, label %for.cond.cleanup, label %vector.ph
16 vector.ph: ; preds = %entry
17 %start = call i32 @llvm.start.loop.iterations.i32(i32 %tmp13)
20 vector.body: ; preds = %vector.body, %vector.ph
21 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
22 %tmp14 = phi i32 [ %start, %vector.ph ], [ %tmp15, %vector.body ]
23 %tmp = getelementptr inbounds i16, ptr %a, i32 %index
24 %tmp1 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N)
25 %wide.masked.load = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %tmp, i32 4, <8 x i1> %tmp1, <8 x i16> undef)
26 %tmp3 = getelementptr inbounds i16, ptr %b, i32 %index
27 %wide.masked.load2 = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %tmp3, i32 4, <8 x i1> %tmp1, <8 x i16> undef)
28 %expand.1 = zext <8 x i16> %wide.masked.load to <8 x i32>
29 %expand.2 = zext <8 x i16> %wide.masked.load2 to <8 x i32>
30 %mul = mul nsw <8 x i32> %expand.2, %expand.1
31 %tmp6 = getelementptr inbounds i32, ptr %c, i32 %index
32 tail call void @llvm.masked.store.v8i32.p0(<8 x i32> %mul, ptr %tmp6, i32 4, <8 x i1> %tmp1)
33 %index.next = add i32 %index, 8
34 %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
35 %tmp16 = icmp ne i32 %tmp15, 0
36 br i1 %tmp16, label %vector.body, label %for.cond.cleanup
38 for.cond.cleanup: ; preds = %vector.body, %entry
42 ; CHECK-LABEL: expand_v8i16_v4i32
43 ; CHECK: [[ELEMS:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[ELEMS_REM:%[^ ]+]], %vector.body ]
44 ; CHECK: [[VCTP:%[^ ]+]] = call <8 x i1> @llvm.arm.mve.vctp16(i32 [[ELEMS]])
45 ; CHECK: [[ELEMS_REM]] = sub i32 [[ELEMS]], 8
46 ; CHECK: tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr {{.*}}, i32 4, <8 x i1> [[VCTP]], <8 x i16> undef)
47 ; CHECK: %store.pred = icmp ule <4 x i32> %induction.store
48 ; CHECK: tail call void @llvm.masked.store.v4i32.p0(<4 x i32> {{.*}}, ptr {{.*}}, i32 4, <4 x i1> %store.pred)
49 ; CHECK: tail call void @llvm.masked.store.v4i32.p0(<4 x i32> {{.*}}, ptr {{.*}}, i32 4, <4 x i1> %store.pred)
50 define void @expand_v8i16_v4i32(ptr readonly %a, ptr readonly %b, ptr %c, ptr %d, i32 %N) {
52 %cmp8 = icmp eq i32 %N, 0
54 %tmp9 = lshr i32 %tmp8, 3
55 %tmp10 = shl nuw i32 %tmp9, 3
56 %tmp11 = add i32 %tmp10, -8
57 %tmp12 = lshr i32 %tmp11, 3
58 %tmp13 = add nuw nsw i32 %tmp12, 1
59 br i1 %cmp8, label %for.cond.cleanup, label %vector.ph
61 vector.ph: ; preds = %entry
62 %trip.count.minus.1 = add i32 %N, -1
63 %broadcast.splatinsert10.store = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
64 %broadcast.splat11.store = shufflevector <4 x i32> %broadcast.splatinsert10.store, <4 x i32> undef, <4 x i32> zeroinitializer
65 %start = call i32 @llvm.start.loop.iterations.i32(i32 %tmp13)
68 vector.body: ; preds = %vector.body, %vector.ph
69 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
70 %store.idx = phi i32 [ 0, %vector.ph ], [ %store.idx.next, %vector.body ]
71 %tmp14 = phi i32 [ %start, %vector.ph ], [ %tmp15, %vector.body ]
72 %tmp = getelementptr inbounds i16, ptr %a, i32 %index
73 %tmp1 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N)
74 %wide.masked.load = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %tmp, i32 4, <8 x i1> %tmp1, <8 x i16> undef)
75 %tmp3 = getelementptr inbounds i16, ptr %b, i32 %index
76 %wide.masked.load2 = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %tmp3, i32 4, <8 x i1> %tmp1, <8 x i16> undef)
77 %extract.2.low = shufflevector <8 x i16> %wide.masked.load2, <8 x i16> undef, < 4 x i32> <i32 0, i32 1, i32 2, i32 3>
78 %extract.2.high = shufflevector <8 x i16> %wide.masked.load2, <8 x i16> undef, < 4 x i32> <i32 4, i32 5, i32 6, i32 7>
79 %expand.1 = zext <4 x i16> %extract.2.low to <4 x i32>
80 %expand.2 = zext <4 x i16> %extract.2.high to <4 x i32>
81 %mul = mul nsw <4 x i32> %expand.2, %expand.1
82 %sub = mul nsw <4 x i32> %expand.1, %expand.2
83 %broadcast.splatinsert.store = insertelement <4 x i32> undef, i32 %store.idx, i32 0
84 %broadcast.splat.store = shufflevector <4 x i32> %broadcast.splatinsert.store, <4 x i32> undef, <4 x i32> zeroinitializer
85 %induction.store = add <4 x i32> %broadcast.splat.store, <i32 0, i32 1, i32 2, i32 3>
86 %store.pred = icmp ule <4 x i32> %induction.store, %broadcast.splat11.store
87 %tmp6 = getelementptr inbounds i32, ptr %c, i32 %store.idx
88 tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %mul, ptr %tmp6, i32 4, <4 x i1> %store.pred)
89 %gep = getelementptr inbounds i32, ptr %d, i32 %store.idx
90 tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %sub, ptr %gep, i32 4, <4 x i1> %store.pred)
91 %store.idx.next = add i32 %store.idx, 4
92 %index.next = add i32 %index, 8
93 %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
94 %tmp16 = icmp ne i32 %tmp15, 0
95 br i1 %tmp16, label %vector.body, label %for.cond.cleanup
97 for.cond.cleanup: ; preds = %vector.body, %entry
101 ; CHECK-LABEL: expand_v4i32_v4i64
102 ; CHECK-NOT: call i32 @llvm.arm.mve.vctp
103 define void @expand_v4i32_v4i64(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
105 %cmp8 = icmp eq i32 %N, 0
106 %tmp8 = add i32 %N, 3
107 %tmp9 = lshr i32 %tmp8, 2
108 %tmp10 = shl nuw i32 %tmp9, 2
109 %tmp11 = add i32 %tmp10, -4
110 %tmp12 = lshr i32 %tmp11, 2
111 %tmp13 = add nuw nsw i32 %tmp12, 1
112 br i1 %cmp8, label %for.cond.cleanup, label %vector.ph
114 vector.ph: ; preds = %entry
115 %start = call i32 @llvm.start.loop.iterations.i32(i32 %tmp13)
116 br label %vector.body
118 vector.body: ; preds = %vector.body, %vector.ph
119 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
120 %tmp14 = phi i32 [ %start, %vector.ph ], [ %tmp15, %vector.body ]
121 %tmp = getelementptr inbounds i32, ptr %a, i32 %index
122 %tmp1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
123 %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
124 %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
125 %wide.masked.load2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
126 %expand.1 = zext <4 x i32> %wide.masked.load to <4 x i64>
127 %expand.2 = zext <4 x i32> %wide.masked.load2 to <4 x i64>
128 %mul = mul nsw <4 x i64> %expand.2, %expand.1
129 %tmp6 = getelementptr inbounds i64, ptr %c, i32 %index
130 tail call void @llvm.masked.store.v4i64.p0(<4 x i64> %mul, ptr %tmp6, i32 4, <4 x i1> %tmp1)
131 %index.next = add i32 %index, 4
132 %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
133 %tmp16 = icmp ne i32 %tmp15, 0
134 br i1 %tmp16, label %vector.body, label %for.cond.cleanup
136 for.cond.cleanup: ; preds = %vector.body, %entry
140 declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32 immarg, <8 x i1>, <8 x i16>)
141 declare void @llvm.masked.store.v8i32.p0(<8 x i32>, ptr, i32 immarg, <8 x i1>)
142 declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
143 declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>)
144 declare void @llvm.masked.store.v4i64.p0(<4 x i64>, ptr, i32 immarg, <4 x i1>)
145 declare i32 @llvm.start.loop.iterations.i32(i32)
146 declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
147 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
148 declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)