1 ; RUN: opt -mtriple=thumbv8m.main -mcpu=cortex-m33 -arm-parallel-dsp %s -S -o - | FileCheck %s
2 ; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m0 < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
3 ; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 -mattr=-dsp < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
5 define i64 @smlaldx(i16* nocapture readonly %pIn1, i16* nocapture readonly %pIn2, i32 %j, i32 %limit) {
8 ; CHECK: = phi i32 [ 0, %for.body.preheader.new ],
9 ; CHECK: [[ACC0:%[^ ]+]] = phi i64 [ 0, %for.body.preheader.new ], [ [[ACC2:%[^ ]+]], %for.body ]
10 ; CHECK: [[PIN21:%[^ ]+]] = bitcast i16* %pIn2.1 to i32*
11 ; CHECK: [[IN21:%[^ ]+]] = load i32, i32* [[PIN21]], align 2
12 ; CHECK: [[PIN10:%[^ ]+]] = bitcast i16* %pIn1.0 to i32*
13 ; CHECK: [[IN10:%[^ ]+]] = load i32, i32* [[PIN10]], align 2
14 ; CHECK: [[PIN23:%[^ ]+]] = bitcast i16* %pIn2.3 to i32*
15 ; CHECK: [[IN23:%[^ ]+]] = load i32, i32* [[PIN23]], align 2
16 ; CHECK: [[PIN12:%[^ ]+]] = bitcast i16* %pIn1.2 to i32*
17 ; CHECK: [[IN12:%[^ ]+]] = load i32, i32* [[PIN12]], align 2
18 ; CHECK: [[ACC1:%[^ ]+]] = call i64 @llvm.arm.smlaldx(i32 [[IN23]], i32 [[IN12]], i64 [[ACC0]])
19 ; CHECK: [[ACC2]] = call i64 @llvm.arm.smlaldx(i32 [[IN21]], i32 [[IN10]], i64 [[ACC1]])
20 ; CHECK-NOT: call i64 @llvm.arm.smlad
21 ; CHECK-UNSUPPORTED-NOT: call i64 @llvm.arm.smlad
24 %cmp9 = icmp eq i32 %limit, 0
25 br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader
28 %0 = add i32 %limit, -1
29 %xtraiter = and i32 %limit, 3
30 %1 = icmp ult i32 %0, 3
31 br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
33 for.body.preheader.new:
34 %unroll_iter = sub i32 %limit, %xtraiter
37 for.cond.cleanup.loopexit.unr-lcssa:
38 %add.lcssa.ph = phi i64 [ undef, %for.body.preheader ], [ %add.3, %for.body ]
39 %i.011.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ]
40 %sum.010.unr = phi i64 [ 0, %for.body.preheader ], [ %add.3, %for.body ]
41 %lcmp.mod = icmp eq i32 %xtraiter, 0
42 br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil
45 %i.011.epil = phi i32 [ %inc.epil, %for.body.epil ], [ %i.011.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
46 %sum.010.epil = phi i64 [ %add.epil, %for.body.epil ], [ %sum.010.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
47 %epil.iter = phi i32 [ %epil.iter.sub, %for.body.epil ], [ %xtraiter, %for.cond.cleanup.loopexit.unr-lcssa ]
48 %sub.epil = sub i32 %j, %i.011.epil
49 %arrayidx.epil = getelementptr inbounds i16, i16* %pIn2, i32 %sub.epil
50 %2 = load i16, i16* %arrayidx.epil, align 2
51 %conv.epil = sext i16 %2 to i32
52 %arrayidx1.epil = getelementptr inbounds i16, i16* %pIn1, i32 %i.011.epil
53 %3 = load i16, i16* %arrayidx1.epil, align 2
54 %conv2.epil = sext i16 %3 to i32
55 %mul.epil = mul nsw i32 %conv2.epil, %conv.epil
56 %sext.mul.epil = sext i32 %mul.epil to i64
57 %add.epil = add nsw i64 %sext.mul.epil, %sum.010.epil
58 %inc.epil = add nuw i32 %i.011.epil, 1
59 %epil.iter.sub = add i32 %epil.iter, -1
60 %epil.iter.cmp = icmp eq i32 %epil.iter.sub, 0
61 br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil
64 %sum.0.lcssa = phi i64 [ 0, %entry ], [ %add.lcssa.ph, %for.cond.cleanup.loopexit.unr-lcssa ], [ %add.epil, %for.body.epil ]
68 %i.011 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
69 %sum.010 = phi i64 [ 0, %for.body.preheader.new ], [ %add.3, %for.body ]
70 %niter = phi i32 [ %unroll_iter, %for.body.preheader.new ], [ %niter.nsub.3, %for.body ]
71 %pIn2Base = phi i16* [ %pIn2, %for.body.preheader.new ], [ %pIn2.4, %for.body ]
72 %pIn2.0 = getelementptr inbounds i16, i16* %pIn2Base, i32 0
73 %In2 = load i16, i16* %pIn2.0, align 2
74 %pIn1.0 = getelementptr inbounds i16, i16* %pIn1, i32 %i.011
75 %In1 = load i16, i16* %pIn1.0, align 2
76 %inc = or i32 %i.011, 1
77 %pIn2.1 = getelementptr inbounds i16, i16* %pIn2Base, i32 -1
78 %In2.1 = load i16, i16* %pIn2.1, align 2
79 %pIn1.1 = getelementptr inbounds i16, i16* %pIn1, i32 %inc
80 %In1.1 = load i16, i16* %pIn1.1, align 2
81 %inc.1 = or i32 %i.011, 2
82 %pIn2.2 = getelementptr inbounds i16, i16* %pIn2Base, i32 -2
83 %In2.2 = load i16, i16* %pIn2.2, align 2
84 %pIn1.2 = getelementptr inbounds i16, i16* %pIn1, i32 %inc.1
85 %In1.2 = load i16, i16* %pIn1.2, align 2
86 %inc.2 = or i32 %i.011, 3
87 %pIn2.3 = getelementptr inbounds i16, i16* %pIn2Base, i32 -3
88 %In2.3 = load i16, i16* %pIn2.3, align 2
89 %pIn1.3 = getelementptr inbounds i16, i16* %pIn1, i32 %inc.2
90 %In1.3 = load i16, i16* %pIn1.3, align 2
91 %sextIn1 = sext i16 %In1 to i32
92 %sextIn1.1 = sext i16 %In1.1 to i32
93 %sextIn1.2 = sext i16 %In1.2 to i32
94 %sextIn1.3 = sext i16 %In1.3 to i32
95 %sextIn2 = sext i16 %In2 to i32
96 %sextIn2.1 = sext i16 %In2.1 to i32
97 %sextIn2.2 = sext i16 %In2.2 to i32
98 %sextIn2.3 = sext i16 %In2.3 to i32
99 %mul = mul nsw i32 %sextIn1, %sextIn2
100 %mul.1 = mul nsw i32 %sextIn1.1, %sextIn2.1
101 %mul.2 = mul nsw i32 %sextIn1.2, %sextIn2.2
102 %mul.3 = mul nsw i32 %sextIn1.3, %sextIn2.3
103 %sext.mul = sext i32 %mul to i64
104 %sext.mul.1 = sext i32 %mul.1 to i64
105 %sext.mul.2 = sext i32 %mul.2 to i64
106 %sext.mul.3 = sext i32 %mul.3 to i64
107 %add = add nsw i64 %sum.010, %sext.mul
108 %add.1 = add nsw i64 %sext.mul.1, %add
109 %add.2 = add nsw i64 %add.1, %sext.mul.2
110 %add.3 = add nsw i64 %sext.mul.3, %add.2
111 %inc.3 = add i32 %i.011, 4
112 %pIn2.4 = getelementptr inbounds i16, i16* %pIn2Base, i32 -4
113 %niter.nsub.3 = add i32 %niter, -4
114 %niter.ncmp.3 = icmp eq i32 %niter.nsub.3, 0
115 br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
118 define i64 @smlaldx_swap(i16* nocapture readonly %pIn1, i16* nocapture readonly %pIn2, i32 %j, i32 %limit) {
121 %cmp9 = icmp eq i32 %limit, 0
122 br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader
125 %0 = add i32 %limit, -1
126 %xtraiter = and i32 %limit, 3
127 %1 = icmp ult i32 %0, 3
128 br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
130 for.body.preheader.new:
131 %unroll_iter = sub i32 %limit, %xtraiter
132 %scevgep6 = getelementptr i16, i16* %pIn1, i32 2
134 %scevgep11 = getelementptr i16, i16* %pIn2, i32 %2
137 for.cond.cleanup.loopexit.unr-lcssa:
138 %add.lcssa.ph = phi i64 [ undef, %for.body.preheader ], [ %add.3, %for.body ]
139 %i.011.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ]
140 %sum.010.unr = phi i64 [ 0, %for.body.preheader ], [ %add.3, %for.body ]
141 %lcmp.mod = icmp eq i32 %xtraiter, 0
142 br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil.preheader
144 for.body.epil.preheader:
145 %scevgep = getelementptr i16, i16* %pIn1, i32 %i.011.unr
146 %3 = sub i32 %j, %i.011.unr
147 %scevgep2 = getelementptr i16, i16* %pIn2, i32 %3
148 %4 = sub i32 0, %xtraiter
149 br label %for.body.epil
152 %lsr.iv5 = phi i32 [ %4, %for.body.epil.preheader ], [ %lsr.iv.next, %for.body.epil ]
153 %lsr.iv3 = phi i16* [ %scevgep2, %for.body.epil.preheader ], [ %scevgep4, %for.body.epil ]
154 %lsr.iv = phi i16* [ %scevgep, %for.body.epil.preheader ], [ %scevgep1, %for.body.epil ]
155 %sum.010.epil = phi i64 [ %add.epil, %for.body.epil ], [ %sum.010.unr, %for.body.epil.preheader ]
156 %5 = load i16, i16* %lsr.iv3, align 2
157 %conv.epil = sext i16 %5 to i32
158 %6 = load i16, i16* %lsr.iv, align 2
159 %conv2.epil = sext i16 %6 to i32
160 %mul.epil = mul nsw i32 %conv2.epil, %conv.epil
161 %sext.mul.epil = sext i32 %mul.epil to i64
162 %add.epil = add nsw i64 %sext.mul.epil, %sum.010.epil
163 %scevgep1 = getelementptr i16, i16* %lsr.iv, i32 1
164 %scevgep4 = getelementptr i16, i16* %lsr.iv3, i32 -1
165 %lsr.iv.next = add nsw i32 %lsr.iv5, 1
166 %epil.iter.cmp = icmp eq i32 %lsr.iv.next, 0
167 br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil
170 %sum.0.lcssa = phi i64 [ 0, %entry ], [ %add.lcssa.ph, %for.cond.cleanup.loopexit.unr-lcssa ], [ %add.epil, %for.body.epil ]
173 ; CHECK-LABEL: smlaldx_swap
174 ; CHECK: for.body.preheader.new:
175 ; CHECK: [[PIN1Base:[^ ]+]] = getelementptr i16, i16* %pIn1
176 ; CHECK: [[PIN2Base:[^ ]+]] = getelementptr i16, i16* %pIn2
179 ; CHECK: [[PIN2:%[^ ]+]] = phi i16* [ [[PIN2_NEXT:%[^ ]+]], %for.body ], [ [[PIN2Base]], %for.body.preheader.new ]
180 ; CHECK: [[PIN1:%[^ ]+]] = phi i16* [ [[PIN1_NEXT:%[^ ]+]], %for.body ], [ [[PIN1Base]], %for.body.preheader.new ]
181 ; CHECK: [[IV:%[^ ]+]] = phi i32
182 ; CHECK: [[ACC0:%[^ ]+]] = phi i64 [ 0, %for.body.preheader.new ], [ [[ACC2:%[^ ]+]], %for.body ]
184 ; CHECK: [[PIN2_CAST:%[^ ]+]] = bitcast i16* [[PIN2]] to i32*
185 ; CHECK: [[IN2:%[^ ]+]] = load i32, i32* [[PIN2_CAST]], align 2
187 ; CHECK: [[PIN1_2:%[^ ]+]] = getelementptr i16, i16* [[PIN1]], i32 -2
188 ; CHECK: [[PIN1_2_CAST:%[^ ]+]] = bitcast i16* [[PIN1_2]] to i32*
189 ; CHECK: [[IN1_2:%[^ ]+]] = load i32, i32* [[PIN1_2_CAST]], align 2
191 ; CHECK: [[PIN2_2:%[^ ]+]] = getelementptr i16, i16* [[PIN2]], i32 -2
192 ; CHECK: [[PIN2_2_CAST:%[^ ]+]] = bitcast i16* [[PIN2_2]] to i32*
193 ; CHECK: [[IN2_2:%[^ ]+]] = load i32, i32* [[PIN2_2_CAST]], align 2
195 ; CHECK: [[PIN1_CAST:%[^ ]+]] = bitcast i16* [[PIN1]] to i32*
196 ; CHECK: [[IN1:%[^ ]+]] = load i32, i32* [[PIN1_CAST]], align 2
198 ; CHECK: [[ACC1:%[^ ]+]] = call i64 @llvm.arm.smlaldx(i32 [[IN2_2]], i32 [[IN1]], i64 [[ACC0]])
199 ; CHECK: [[ACC2]] = call i64 @llvm.arm.smlaldx(i32 [[IN2]], i32 [[IN1_2]], i64 [[ACC1]])
201 ; CHECK: [[PIN1_NEXT]] = getelementptr i16, i16* [[PIN1]], i32 4
202 ; CHECK: [[PIN2_NEXT]] = getelementptr i16, i16* [[PIN2]], i32 -4
204 ; CHECK-NOT: call i64 @llvm.arm.smlad
205 ; CHECK-UNSUPPORTED-NOT: call i64 @llvm.arm.smlad
208 %pin2 = phi i16* [ %pin2.sub4, %for.body ], [ %scevgep11, %for.body.preheader.new ]
209 %pin1 = phi i16* [ %pin1.add4, %for.body ], [ %scevgep6, %for.body.preheader.new ]
210 %i.011 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
211 %sum.010 = phi i64 [ 0, %for.body.preheader.new ], [ %add.3, %for.body ]
212 %pin2.add1 = getelementptr i16, i16* %pin2, i32 1
213 %In2 = load i16, i16* %pin2.add1, align 2
214 %pin1.sub2 = getelementptr i16, i16* %pin1, i32 -2
215 %In1 = load i16, i16* %pin1.sub2, align 2
216 %In2.1 = load i16, i16* %pin2, align 2
217 %pin1.sub1 = getelementptr i16, i16* %pin1, i32 -1
218 %In1.1 = load i16, i16* %pin1.sub1, align 2
219 %pin2.sub1 = getelementptr i16, i16* %pin2, i32 -1
220 %In2.2 = load i16, i16* %pin2.sub1, align 2
221 %In1.2 = load i16, i16* %pin1, align 2
222 %pin2.sub2 = getelementptr i16, i16* %pin2, i32 -2
223 %In2.3 = load i16, i16* %pin2.sub2, align 2
224 %pin1.add1 = getelementptr i16, i16* %pin1, i32 1
225 %In1.3 = load i16, i16* %pin1.add1, align 2
226 %sextIn2 = sext i16 %In2 to i32
227 %sextIn1 = sext i16 %In1 to i32
228 %sextIn2.1 = sext i16 %In2.1 to i32
229 %sextIn1.1 = sext i16 %In1.1 to i32
230 %sextIn2.2 = sext i16 %In2.2 to i32
231 %sextIn1.2 = sext i16 %In1.2 to i32
232 %sextIn2.3 = sext i16 %In2.3 to i32
233 %sextIn1.3 = sext i16 %In1.3 to i32
234 %mul = mul nsw i32 %sextIn2, %sextIn1
235 %sext.mul = sext i32 %mul to i64
236 %add = add nsw i64 %sext.mul, %sum.010
237 %mul.1 = mul nsw i32 %sextIn2.1, %sextIn1.1
238 %sext.mul.1 = sext i32 %mul.1 to i64
239 %add.1 = add nsw i64 %sext.mul.1, %add
240 %mul.2 = mul nsw i32 %sextIn2.2, %sextIn1.2
241 %sext.mul.2 = sext i32 %mul.2 to i64
242 %add.2 = add nsw i64 %add.1, %sext.mul.2
243 %mul.3 = mul nsw i32 %sextIn2.3, %sextIn1.3
244 %sext.mul.3 = sext i32 %mul.3 to i64
245 %add.3 = add nsw i64 %add.2, %sext.mul.3
246 %inc.3 = add i32 %i.011, 4
247 %pin1.add4 = getelementptr i16, i16* %pin1, i32 4
248 %pin2.sub4 = getelementptr i16, i16* %pin2, i32 -4
249 %niter.ncmp.3 = icmp eq i32 %unroll_iter, %inc.3
250 br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body