1 ; RUN: opt -mtriple=thumbv8.1m.main -mve-tail-predication -tail-predication=enabled -mattr=+mve,+lob %s -S -o - | FileCheck %s
3 ; CHECK-LABEL: expand_v8i16_v8i32
4 ; CHECK-NOT: call i32 @llvm.arm.mve.vctp
5 define void @expand_v8i16_v8i32(i16* noalias nocapture readonly %a, i16* noalias nocapture readonly %b, i32* noalias nocapture %c, i32 %N) {
7 %cmp8 = icmp eq i32 %N, 0
9 %tmp9 = lshr i32 %tmp8, 3
10 %tmp10 = shl nuw i32 %tmp9, 3
11 %tmp11 = add i32 %tmp10, -8
12 %tmp12 = lshr i32 %tmp11, 3
13 %tmp13 = add nuw nsw i32 %tmp12, 1
14 br i1 %cmp8, label %for.cond.cleanup, label %vector.ph
16 vector.ph: ; preds = %entry
17 %start = call i32 @llvm.start.loop.iterations.i32(i32 %tmp13)
20 vector.body: ; preds = %vector.body, %vector.ph
21 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
22 %tmp14 = phi i32 [ %start, %vector.ph ], [ %tmp15, %vector.body ]
23 %tmp = getelementptr inbounds i16, i16* %a, i32 %index
24 %tmp1 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N)
25 %tmp2 = bitcast i16* %tmp to <8 x i16>*
26 %wide.masked.load = tail call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp2, i32 4, <8 x i1> %tmp1, <8 x i16> undef)
27 %tmp3 = getelementptr inbounds i16, i16* %b, i32 %index
28 %tmp4 = bitcast i16* %tmp3 to <8 x i16>*
29 %wide.masked.load2 = tail call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp4, i32 4, <8 x i1> %tmp1, <8 x i16> undef)
30 %expand.1 = zext <8 x i16> %wide.masked.load to <8 x i32>
31 %expand.2 = zext <8 x i16> %wide.masked.load2 to <8 x i32>
32 %mul = mul nsw <8 x i32> %expand.2, %expand.1
33 %tmp6 = getelementptr inbounds i32, i32* %c, i32 %index
34 %tmp7 = bitcast i32* %tmp6 to <8 x i32>*
35 tail call void @llvm.masked.store.v8i32.p0v8i32(<8 x i32> %mul, <8 x i32>* %tmp7, i32 4, <8 x i1> %tmp1)
36 %index.next = add i32 %index, 8
37 %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
38 %tmp16 = icmp ne i32 %tmp15, 0
39 br i1 %tmp16, label %vector.body, label %for.cond.cleanup
41 for.cond.cleanup: ; preds = %vector.body, %entry
45 ; CHECK-LABEL: expand_v8i16_v4i32
46 ; CHECK: [[ELEMS:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[ELEMS_REM:%[^ ]+]], %vector.body ]
47 ; CHECK: [[VCTP:%[^ ]+]] = call <8 x i1> @llvm.arm.mve.vctp16(i32 [[ELEMS]])
48 ; CHECK: [[ELEMS_REM]] = sub i32 [[ELEMS]], 8
49 ; CHECK: tail call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* {{.*}}, i32 4, <8 x i1> [[VCTP]], <8 x i16> undef)
50 ; CHECK: %store.pred = icmp ule <4 x i32> %induction.store
51 ; CHECK: tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> {{.*}}, <4 x i32>* {{.*}}, i32 4, <4 x i1> %store.pred)
52 ; CHECK: tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> {{.*}}, <4 x i32>* {{.*}}, i32 4, <4 x i1> %store.pred)
53 define void @expand_v8i16_v4i32(i16* readonly %a, i16* readonly %b, i32* %c, i32* %d, i32 %N) {
55 %cmp8 = icmp eq i32 %N, 0
57 %tmp9 = lshr i32 %tmp8, 3
58 %tmp10 = shl nuw i32 %tmp9, 3
59 %tmp11 = add i32 %tmp10, -8
60 %tmp12 = lshr i32 %tmp11, 3
61 %tmp13 = add nuw nsw i32 %tmp12, 1
62 br i1 %cmp8, label %for.cond.cleanup, label %vector.ph
64 vector.ph: ; preds = %entry
65 %trip.count.minus.1 = add i32 %N, -1
66 %broadcast.splatinsert10.store = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
67 %broadcast.splat11.store = shufflevector <4 x i32> %broadcast.splatinsert10.store, <4 x i32> undef, <4 x i32> zeroinitializer
68 %start = call i32 @llvm.start.loop.iterations.i32(i32 %tmp13)
71 vector.body: ; preds = %vector.body, %vector.ph
72 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
73 %store.idx = phi i32 [ 0, %vector.ph ], [ %store.idx.next, %vector.body ]
74 %tmp14 = phi i32 [ %start, %vector.ph ], [ %tmp15, %vector.body ]
75 %tmp = getelementptr inbounds i16, i16* %a, i32 %index
76 %tmp1 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N)
77 %tmp2 = bitcast i16* %tmp to <8 x i16>*
78 %wide.masked.load = tail call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp2, i32 4, <8 x i1> %tmp1, <8 x i16> undef)
79 %tmp3 = getelementptr inbounds i16, i16* %b, i32 %index
80 %tmp4 = bitcast i16* %tmp3 to <8 x i16>*
81 %wide.masked.load2 = tail call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp4, i32 4, <8 x i1> %tmp1, <8 x i16> undef)
82 %extract.2.low = shufflevector <8 x i16> %wide.masked.load2, <8 x i16> undef, < 4 x i32> <i32 0, i32 1, i32 2, i32 3>
83 %extract.2.high = shufflevector <8 x i16> %wide.masked.load2, <8 x i16> undef, < 4 x i32> <i32 4, i32 5, i32 6, i32 7>
84 %expand.1 = zext <4 x i16> %extract.2.low to <4 x i32>
85 %expand.2 = zext <4 x i16> %extract.2.high to <4 x i32>
86 %mul = mul nsw <4 x i32> %expand.2, %expand.1
87 %sub = mul nsw <4 x i32> %expand.1, %expand.2
88 %broadcast.splatinsert.store = insertelement <4 x i32> undef, i32 %store.idx, i32 0
89 %broadcast.splat.store = shufflevector <4 x i32> %broadcast.splatinsert.store, <4 x i32> undef, <4 x i32> zeroinitializer
90 %induction.store = add <4 x i32> %broadcast.splat.store, <i32 0, i32 1, i32 2, i32 3>
91 %store.pred = icmp ule <4 x i32> %induction.store, %broadcast.splat11.store
92 %tmp6 = getelementptr inbounds i32, i32* %c, i32 %store.idx
93 %tmp7 = bitcast i32* %tmp6 to <4 x i32>*
94 tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %mul, <4 x i32>* %tmp7, i32 4, <4 x i1> %store.pred)
95 %gep = getelementptr inbounds i32, i32* %d, i32 %store.idx
96 %cast.gep = bitcast i32* %gep to <4 x i32>*
97 tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %sub, <4 x i32>* %cast.gep, i32 4, <4 x i1> %store.pred)
98 %store.idx.next = add i32 %store.idx, 4
99 %index.next = add i32 %index, 8
100 %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
101 %tmp16 = icmp ne i32 %tmp15, 0
102 br i1 %tmp16, label %vector.body, label %for.cond.cleanup
104 for.cond.cleanup: ; preds = %vector.body, %entry
108 ; CHECK-LABEL: expand_v4i32_v4i64
109 ; CHECK-NOT: call i32 @llvm.arm.mve.vctp
110 define void @expand_v4i32_v4i64(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i64* noalias nocapture %c, i32 %N) {
112 %cmp8 = icmp eq i32 %N, 0
113 %tmp8 = add i32 %N, 3
114 %tmp9 = lshr i32 %tmp8, 2
115 %tmp10 = shl nuw i32 %tmp9, 2
116 %tmp11 = add i32 %tmp10, -4
117 %tmp12 = lshr i32 %tmp11, 2
118 %tmp13 = add nuw nsw i32 %tmp12, 1
119 br i1 %cmp8, label %for.cond.cleanup, label %vector.ph
121 vector.ph: ; preds = %entry
122 %start = call i32 @llvm.start.loop.iterations.i32(i32 %tmp13)
123 br label %vector.body
125 vector.body: ; preds = %vector.body, %vector.ph
126 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
127 %tmp14 = phi i32 [ %start, %vector.ph ], [ %tmp15, %vector.body ]
128 %tmp = getelementptr inbounds i32, i32* %a, i32 %index
129 %tmp1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
130 %tmp2 = bitcast i32* %tmp to <4 x i32>*
131 %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
132 %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
133 %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
134 %wide.masked.load2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
135 %expand.1 = zext <4 x i32> %wide.masked.load to <4 x i64>
136 %expand.2 = zext <4 x i32> %wide.masked.load2 to <4 x i64>
137 %mul = mul nsw <4 x i64> %expand.2, %expand.1
138 %tmp6 = getelementptr inbounds i64, i64* %c, i32 %index
139 %tmp7 = bitcast i64* %tmp6 to <4 x i64>*
140 tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %mul, <4 x i64>* %tmp7, i32 4, <4 x i1> %tmp1)
141 %index.next = add i32 %index, 4
142 %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
143 %tmp16 = icmp ne i32 %tmp15, 0
144 br i1 %tmp16, label %vector.body, label %for.cond.cleanup
146 for.cond.cleanup: ; preds = %vector.body, %entry
150 declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32 immarg, <8 x i1>, <8 x i16>)
151 declare void @llvm.masked.store.v8i32.p0v8i32(<8 x i32>, <8 x i32>*, i32 immarg, <8 x i1>)
152 declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
153 declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
154 declare void @llvm.masked.store.v4i64.p0v4i64(<4 x i64>, <4 x i64>*, i32 immarg, <4 x i1>)
155 declare i32 @llvm.start.loop.iterations.i32(i32)
156 declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
157 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
158 declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)