1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+m,+f,+d,+a,+c,+v \
3 ; RUN: -target-abi=lp64d -verify-machineinstrs -O2 < %s | FileCheck %s
5 declare i64 @llvm.riscv.vsetvli(i64, i64, i64)
6 declare i64 @llvm.riscv.vsetvlimax(i64, i64)
7 declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
10 <vscale x 1 x double>,
12 declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
18 define <vscale x 1 x double> @test1(i64 %avl, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
20 ; CHECK: # %bb.0: # %entry
21 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
22 ; CHECK-NEXT: vfadd.vv v8, v8, v9
25 %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 7)
26 %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
27 <vscale x 1 x double> undef,
28 <vscale x 1 x double> %a,
29 <vscale x 1 x double> %b,
31 ret <vscale x 1 x double> %1
34 define <vscale x 1 x double> @test2(i64 %avl, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
36 ; CHECK: # %bb.0: # %entry
37 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
38 ; CHECK-NEXT: vfadd.vv v8, v8, v9
41 %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 7)
42 %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
43 <vscale x 1 x double> undef,
44 <vscale x 1 x double> %a,
45 <vscale x 1 x double> %b,
47 ret <vscale x 1 x double> %1
50 define <vscale x 1 x i64> @test3(i64 %avl, <vscale x 1 x i64> %a, <vscale x 1 x i64>* %b, <vscale x 1 x i1> %c) nounwind {
52 ; CHECK: # %bb.0: # %entry
53 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
54 ; CHECK-NEXT: vle64.v v8, (a1), v0.t
57 %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0)
58 %1 = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
59 <vscale x 1 x i64> %a,
60 <vscale x 1 x i64>* %b,
64 ret <vscale x 1 x i64> %1
67 define <vscale x 1 x i64> @test4(i64 %avl, <vscale x 1 x i64> %a, <vscale x 1 x i64>* %b, <vscale x 1 x i1> %c) nounwind {
69 ; CHECK: # %bb.0: # %entry
70 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
71 ; CHECK-NEXT: vle64.v v8, (a1), v0.t
74 %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0)
75 %1 = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
76 <vscale x 1 x i64> %a,
77 <vscale x 1 x i64>* %b,
81 ret <vscale x 1 x i64> %1
84 ; Make sure we don't insert a vsetvli for the vmand instruction.
85 define <vscale x 1 x i1> @test5(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %avl) nounwind {
87 ; CHECK: # %bb.0: # %entry
88 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
89 ; CHECK-NEXT: vmseq.vv v8, v8, v9
90 ; CHECK-NEXT: vmand.mm v0, v8, v0
93 %vl = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0)
94 %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %vl)
95 %b = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1> %a, <vscale x 1 x i1> %2, i64 %vl)
96 ret <vscale x 1 x i1> %b
98 declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, i64)
99 declare <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1>, <vscale x 1 x i1>, i64)
101 ; Make sure we don't insert a vsetvli for the vmor instruction.
102 define void @test6(i32* nocapture readonly %A, i32* nocapture %B, i64 %n) {
103 ; CHECK-LABEL: test6:
104 ; CHECK: # %bb.0: # %entry
105 ; CHECK-NEXT: vsetvli a6, a2, e32, m1, ta, ma
106 ; CHECK-NEXT: beqz a6, .LBB5_3
107 ; CHECK-NEXT: # %bb.1: # %for.body.preheader
108 ; CHECK-NEXT: li a4, 0
109 ; CHECK-NEXT: .LBB5_2: # %for.body
110 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
111 ; CHECK-NEXT: slli a3, a4, 2
112 ; CHECK-NEXT: add a5, a0, a3
113 ; CHECK-NEXT: vle32.v v8, (a5)
114 ; CHECK-NEXT: vmsle.vi v9, v8, -3
115 ; CHECK-NEXT: vmsgt.vi v10, v8, 2
116 ; CHECK-NEXT: vmor.mm v0, v9, v10
117 ; CHECK-NEXT: add a3, a3, a1
118 ; CHECK-NEXT: vse32.v v8, (a3), v0.t
119 ; CHECK-NEXT: add a4, a4, a6
120 ; CHECK-NEXT: vsetvli a6, a2, e32, m1, ta, ma
121 ; CHECK-NEXT: bnez a6, .LBB5_2
122 ; CHECK-NEXT: .LBB5_3: # %for.cond.cleanup
125 %0 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %n, i64 2, i64 0)
126 %cmp.not11 = icmp eq i64 %0, 0
127 br i1 %cmp.not11, label %for.cond.cleanup, label %for.body
129 for.cond.cleanup: ; preds = %for.body, %entry
132 for.body: ; preds = %entry, %for.body
133 %1 = phi i64 [ %8, %for.body ], [ %0, %entry ]
134 %i.012 = phi i64 [ %add, %for.body ], [ 0, %entry ]
135 %add.ptr = getelementptr inbounds i32, i32* %A, i64 %i.012
136 %2 = bitcast i32* %add.ptr to <vscale x 2 x i32>*
137 %3 = tail call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %2, i64 %1)
138 %4 = tail call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32.i64(<vscale x 2 x i32> %3, i32 -2, i64 %1)
139 %5 = tail call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32.i64(<vscale x 2 x i32> %3, i32 2, i64 %1)
140 %6 = tail call <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1.i64(<vscale x 2 x i1> %4, <vscale x 2 x i1> %5, i64 %1)
141 %add.ptr1 = getelementptr inbounds i32, i32* %B, i64 %i.012
142 %7 = bitcast i32* %add.ptr1 to <vscale x 2 x i32>*
143 tail call void @llvm.riscv.vse.mask.nxv2i32.i64(<vscale x 2 x i32> %3, <vscale x 2 x i32>* %7, <vscale x 2 x i1> %6, i64 %1)
144 %add = add i64 %1, %i.012
145 %8 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %n, i64 2, i64 0)
146 %cmp.not = icmp eq i64 %8, 0
147 br i1 %cmp.not, label %for.cond.cleanup, label %for.body
150 define <vscale x 1 x i64> @test7(<vscale x 1 x i64> %a, i64 %b, <vscale x 1 x i1> %mask) nounwind {
151 ; CHECK-LABEL: test7:
152 ; CHECK: # %bb.0: # %entry
153 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, tu, ma
154 ; CHECK-NEXT: vmv.s.x v8, a0
157 %x = tail call i64 @llvm.riscv.vsetvlimax(i64 3, i64 0)
158 %y = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(
159 <vscale x 1 x i64> %a,
162 ret <vscale x 1 x i64> %y
165 define <vscale x 1 x i64> @test8(<vscale x 1 x i64> %a, i64 %b, <vscale x 1 x i1> %mask) nounwind {
166 ; CHECK-LABEL: test8:
167 ; CHECK: # %bb.0: # %entry
168 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, ma
169 ; CHECK-NEXT: vmv.s.x v8, a0
172 %x = tail call i64 @llvm.riscv.vsetvli(i64 6, i64 3, i64 0)
173 %y = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(<vscale x 1 x i64> %a, i64 %b, i64 2)
174 ret <vscale x 1 x i64> %y
177 define <vscale x 1 x i64> @test9(<vscale x 1 x i64> %a, i64 %b, <vscale x 1 x i1> %mask) nounwind {
178 ; CHECK-LABEL: test9:
179 ; CHECK: # %bb.0: # %entry
180 ; CHECK-NEXT: vsetivli zero, 9, e64, m1, tu, mu
181 ; CHECK-NEXT: vadd.vv v8, v8, v8, v0.t
182 ; CHECK-NEXT: vmv.s.x v8, a0
185 %x = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
186 <vscale x 1 x i64> %a,
187 <vscale x 1 x i64> %a,
188 <vscale x 1 x i64> %a,
189 <vscale x 1 x i1> %mask,
192 %y = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(<vscale x 1 x i64> %x, i64 %b, i64 2)
193 ret <vscale x 1 x i64> %y
196 define <vscale x 1 x double> @test10(<vscale x 1 x double> %a, double %b) nounwind {
197 ; CHECK-LABEL: test10:
198 ; CHECK: # %bb.0: # %entry
199 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, tu, ma
200 ; CHECK-NEXT: vfmv.s.f v8, fa0
203 %x = tail call i64 @llvm.riscv.vsetvlimax(i64 3, i64 0)
204 %y = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(
205 <vscale x 1 x double> %a, double %b, i64 1)
206 ret <vscale x 1 x double> %y
209 define <vscale x 1 x double> @test11(<vscale x 1 x double> %a, double %b) nounwind {
210 ; CHECK-LABEL: test11:
211 ; CHECK: # %bb.0: # %entry
212 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, ma
213 ; CHECK-NEXT: vfmv.s.f v8, fa0
216 %x = tail call i64 @llvm.riscv.vsetvli(i64 6, i64 3, i64 0)
217 %y = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(
218 <vscale x 1 x double> %a, double %b, i64 2)
219 ret <vscale x 1 x double> %y
222 define <vscale x 1 x double> @test12(<vscale x 1 x double> %a, double %b, <vscale x 1 x i1> %mask) nounwind {
223 ; CHECK-LABEL: test12:
224 ; CHECK: # %bb.0: # %entry
225 ; CHECK-NEXT: vsetivli zero, 9, e64, m1, tu, mu
226 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
227 ; CHECK-NEXT: vfmv.s.f v8, fa0
230 %x = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
231 <vscale x 1 x double> %a,
232 <vscale x 1 x double> %a,
233 <vscale x 1 x double> %a,
234 <vscale x 1 x i1> %mask,
238 %y = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(
239 <vscale x 1 x double> %x, double %b, i64 2)
240 ret <vscale x 1 x double> %y
243 define <vscale x 1 x double> @test13(<vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
244 ; CHECK-LABEL: test13:
245 ; CHECK: # %bb.0: # %entry
246 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
247 ; CHECK-NEXT: vfadd.vv v8, v8, v9
250 %0 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
251 <vscale x 1 x double> undef,
252 <vscale x 1 x double> %a,
253 <vscale x 1 x double> %b,
255 ret <vscale x 1 x double> %0
258 define <vscale x 1 x double> @test14(i64 %avl, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
259 ; CHECK-LABEL: test14:
260 ; CHECK: # %bb.0: # %entry
261 ; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, ma
262 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
263 ; CHECK-NEXT: vfadd.vv v8, v8, v9
264 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
265 ; CHECK-NEXT: vfadd.vv v8, v8, v9
268 %vsetvli = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 7)
269 %f1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
270 <vscale x 1 x double> undef,
271 <vscale x 1 x double> %a,
272 <vscale x 1 x double> %b,
274 %f2 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
275 <vscale x 1 x double> undef,
276 <vscale x 1 x double> %f1,
277 <vscale x 1 x double> %b,
279 ret <vscale x 1 x double> %f2
282 define <vscale x 1 x double> @test15(i64 %avl, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
283 ; CHECK-LABEL: test15:
284 ; CHECK: # %bb.0: # %entry
285 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
286 ; CHECK-NEXT: vfadd.vv v8, v8, v9
287 ; CHECK-NEXT: vfadd.vv v8, v8, v9
290 %vsetvli = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 7)
291 %f1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
292 <vscale x 1 x double> undef,
293 <vscale x 1 x double> %a,
294 <vscale x 1 x double> %b,
296 %f2 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
297 <vscale x 1 x double> undef,
298 <vscale x 1 x double> %f1,
299 <vscale x 1 x double> %b,
301 ret <vscale x 1 x double> %f2
305 @gdouble = external global double
307 define <vscale x 1 x double> @test16(i64 %avl, double %a, <vscale x 1 x double> %b) nounwind {
308 ; CHECK-LABEL: test16:
309 ; CHECK: # %bb.0: # %entry
310 ; CHECK-NEXT: vsetvli a0, a0, e64, mf2, ta, ma
311 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
312 ; CHECK-NEXT: vfmv.v.f v9, fa0
313 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
314 ; CHECK-NEXT: vfadd.vv v8, v9, v8
317 %vsetvli = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 7)
319 %head = insertelement <vscale x 1 x double> poison, double %a, i32 0
320 %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
321 %f2 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
322 <vscale x 1 x double> undef,
323 <vscale x 1 x double> %splat,
324 <vscale x 1 x double> %b,
326 ret <vscale x 1 x double> %f2
329 define double @test17(i64 %avl, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
330 ; CHECK-LABEL: test17:
331 ; CHECK: # %bb.0: # %entry
332 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
333 ; CHECK-NEXT: vfmv.f.s fa5, v8
334 ; CHECK-NEXT: vfadd.vv v8, v8, v9
335 ; CHECK-NEXT: vfmv.f.s fa4, v8
336 ; CHECK-NEXT: fadd.d fa0, fa5, fa4
339 %vsetvli = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 7)
340 %c1 = extractelement <vscale x 1 x double> %a, i32 0
341 %f2 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
342 <vscale x 1 x double> undef,
343 <vscale x 1 x double> %a,
344 <vscale x 1 x double> %b,
346 %c2 = extractelement <vscale x 1 x double> %f2, i32 0
347 %c3 = fadd double %c1, %c2
352 define <vscale x 1 x double> @test18(<vscale x 1 x double> %a, double %b) nounwind {
353 ; CHECK-LABEL: test18:
354 ; CHECK: # %bb.0: # %entry
355 ; CHECK-NEXT: vsetivli zero, 6, e64, m1, tu, ma
356 ; CHECK-NEXT: vmv1r.v v9, v8
357 ; CHECK-NEXT: vfmv.s.f v9, fa0
358 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
359 ; CHECK-NEXT: vfadd.vv v8, v8, v8
360 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma
361 ; CHECK-NEXT: vfmv.s.f v8, fa0
362 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
363 ; CHECK-NEXT: vfadd.vv v8, v9, v8
366 %x = tail call i64 @llvm.riscv.vsetvli(i64 6, i64 3, i64 0)
367 %y = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(
368 <vscale x 1 x double> %a, double %b, i64 2)
369 %f2 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
370 <vscale x 1 x double> undef,
371 <vscale x 1 x double> %a,
372 <vscale x 1 x double> %a,
374 %y2 = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(
375 <vscale x 1 x double> %f2, double %b, i64 1)
376 %res = fadd <vscale x 1 x double> %y, %y2
377 ret <vscale x 1 x double> %res
380 define <vscale x 1 x double> @test19(<vscale x 1 x double> %a, double %b) nounwind {
381 ; CHECK-LABEL: test19:
382 ; CHECK: # %bb.0: # %entry
383 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, ma
384 ; CHECK-NEXT: vmv1r.v v9, v8
385 ; CHECK-NEXT: vfmv.s.f v9, fa0
386 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
387 ; CHECK-NEXT: vfadd.vv v8, v9, v8
390 %x = tail call i64 @llvm.riscv.vsetvli(i64 6, i64 3, i64 0)
391 %y = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(
392 <vscale x 1 x double> %a, double %b, i64 2)
393 %y2 = fadd <vscale x 1 x double> %y, %a
394 ret <vscale x 1 x double> %y2
397 define i64 @avl_forward1(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p) nounwind {
398 ; CHECK-LABEL: avl_forward1:
399 ; CHECK: # %bb.0: # %entry
400 ; CHECK-NEXT: vsetivli a1, 6, e32, m1, ta, ma
401 ; CHECK-NEXT: vse32.v v8, (a0)
402 ; CHECK-NEXT: mv a0, a1
405 %vl = tail call i64 @llvm.riscv.vsetvli(i64 6, i64 2, i64 0)
406 call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %vl)
411 define i64 @avl_forward1b_neg(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p) nounwind {
412 ; CHECK-LABEL: avl_forward1b_neg:
413 ; CHECK: # %bb.0: # %entry
414 ; CHECK-NEXT: vsetivli a1, 6, e16, m1, ta, ma
415 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
416 ; CHECK-NEXT: vse32.v v8, (a0)
417 ; CHECK-NEXT: mv a0, a1
420 %vl = tail call i64 @llvm.riscv.vsetvli(i64 6, i64 1, i64 0)
421 call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %vl)
425 define i64 @avl_forward2(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p) nounwind {
426 ; CHECK-LABEL: avl_forward2:
427 ; CHECK: # %bb.0: # %entry
428 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
429 ; CHECK-NEXT: vse32.v v8, (a0)
430 ; CHECK-NEXT: mv a0, a1
433 %vl = tail call i64 @llvm.riscv.vsetvlimax(i64 2, i64 0)
434 call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %vl)
439 ; %vl is intentionally used only once
440 define void @avl_forward3(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %reg) nounwind {
441 ; CHECK-LABEL: avl_forward3:
442 ; CHECK: # %bb.0: # %entry
443 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
444 ; CHECK-NEXT: vse32.v v8, (a0)
447 %vl = tail call i64 @llvm.riscv.vsetvli(i64 %reg, i64 2, i64 0)
448 call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %vl)
452 ; %vl has multiple uses
453 define i64 @avl_forward3b(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %reg) nounwind {
454 ; CHECK-LABEL: avl_forward3b:
455 ; CHECK: # %bb.0: # %entry
456 ; CHECK-NEXT: vsetvli a1, a1, e32, m1, ta, ma
457 ; CHECK-NEXT: vse32.v v8, (a0)
458 ; CHECK-NEXT: mv a0, a1
461 %vl = tail call i64 @llvm.riscv.vsetvli(i64 %reg, i64 2, i64 0)
462 call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %vl)
466 ; Like4, but with incompatible VTYPE
467 define void @avl_forward4(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %reg) nounwind {
468 ; CHECK-LABEL: avl_forward4:
469 ; CHECK: # %bb.0: # %entry
470 ; CHECK-NEXT: vsetvli a1, a1, e16, m1, ta, ma
471 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
472 ; CHECK-NEXT: vse32.v v8, (a0)
475 %vl = tail call i64 @llvm.riscv.vsetvli(i64 %reg, i64 1, i64 0)
476 call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %vl)
480 ; Like4b, but with incompatible VTYPE
481 define i64 @avl_forward4b(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %reg) nounwind {
482 ; CHECK-LABEL: avl_forward4b:
483 ; CHECK: # %bb.0: # %entry
484 ; CHECK-NEXT: vsetvli a1, a1, e16, m1, ta, ma
485 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
486 ; CHECK-NEXT: vse32.v v8, (a0)
487 ; CHECK-NEXT: mv a0, a1
490 %vl = tail call i64 @llvm.riscv.vsetvli(i64 %reg, i64 1, i64 0)
491 call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %vl)
495 ; Fault first loads can modify VL.
496 ; TODO: The VSETVLI of vadd could be removed here.
497 define <vscale x 1 x i64> @vleNff(i64* %str, i64 %n, i64 %x) {
498 ; CHECK-LABEL: vleNff:
499 ; CHECK: # %bb.0: # %entry
500 ; CHECK-NEXT: vsetvli a1, a1, e8, m4, ta, ma
501 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
502 ; CHECK-NEXT: vle64ff.v v8, (a0)
503 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma
504 ; CHECK-NEXT: vadd.vx v8, v8, a2
507 %0 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %n, i64 0, i64 2)
508 %1 = bitcast i64* %str to <vscale x 1 x i64>*
509 %2 = tail call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* %1, i64 %0)
510 %3 = extractvalue { <vscale x 1 x i64>, i64 } %2, 0
511 %4 = extractvalue { <vscale x 1 x i64>, i64 } %2, 1
512 %5 = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> %3, <vscale x 1 x i64> %3, i64 %x, i64 %4)
513 ret <vscale x 1 x i64> %5
516 ; Similiar test case, but use same policy for vleff and vadd.
517 ; Note: The test may be redundant if we could fix the TODO of @vleNff.
518 define <vscale x 1 x i64> @vleNff2(i64* %str, i64 %n, i64 %x) {
519 ; CHECK-LABEL: vleNff2:
520 ; CHECK: # %bb.0: # %entry
521 ; CHECK-NEXT: vsetvli a1, a1, e8, m4, ta, ma
522 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
523 ; CHECK-NEXT: vle64ff.v v8, (a0)
524 ; CHECK-NEXT: vadd.vx v8, v8, a2
527 %0 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %n, i64 0, i64 2)
528 %1 = bitcast i64* %str to <vscale x 1 x i64>*
529 %2 = tail call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* %1, i64 %0)
530 %3 = extractvalue { <vscale x 1 x i64>, i64 } %2, 0
531 %4 = extractvalue { <vscale x 1 x i64>, i64 } %2, 1
532 %5 = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %3, i64 %x, i64 %4)
533 ret <vscale x 1 x i64> %5
536 declare { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(
537 <vscale x 1 x i64>, <vscale x 1 x i64>* nocapture, i64)
539 declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64.i64(
540 <vscale x 1 x i64>, i64, i64)
542 ; Ensure AVL register is alive when forwarding an AVL immediate that does not fit in 5 bits
543 define <vscale x 2 x i32> @avl_forward5(<vscale x 2 x i32>* %addr) {
544 ; CHECK-LABEL: avl_forward5:
546 ; CHECK-NEXT: li a1, 32
547 ; CHECK-NEXT: vsetvli a1, a1, e8, m4, ta, ma
548 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
549 ; CHECK-NEXT: vle32.v v8, (a0)
551 %gvl = tail call i64 @llvm.riscv.vsetvli.i64(i64 32, i64 0, i64 2)
552 %ret = tail call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %addr, i64 %gvl)
553 ret <vscale x 2 x i32> %ret
556 declare <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32(<vscale x 1 x double>, <vscale x 1 x float>, <vscale x 1 x float>, i64, i64)
558 define <vscale x 1 x double> @test20(i64 %avl, <vscale x 1 x float> %a, <vscale x 1 x float> %b, <vscale x 1 x double> %c) nounwind {
559 ; CHECK-LABEL: test20:
560 ; CHECK: # %bb.0: # %entry
561 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
562 ; CHECK-NEXT: vfwadd.vv v11, v8, v9
563 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
564 ; CHECK-NEXT: vfadd.vv v8, v11, v10
567 %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 7)
568 %1 = tail call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32(
569 <vscale x 1 x double> undef,
570 <vscale x 1 x float> %a,
571 <vscale x 1 x float> %b,
573 %2 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
574 <vscale x 1 x double> undef,
575 <vscale x 1 x double> %1,
576 <vscale x 1 x double> %c,
578 ret <vscale x 1 x double> %2
581 ; This used to fail the machine verifier due to the vsetvli being removed
582 ; while the add was still using it.
583 define i64 @bad_removal(<2 x i64> %arg) {
584 ; CHECK-LABEL: bad_removal:
585 ; CHECK: # %bb.0: # %bb
586 ; CHECK-NEXT: vsetivli a0, 16, e64, m1, ta, ma
587 ; CHECK-NEXT: vmv.x.s a1, v8
588 ; CHECK-NEXT: add a0, a0, a1
591 %tmp = extractelement <2 x i64> %arg, i64 0
592 %tmp1 = call i64 @llvm.riscv.vsetvli.i64(i64 16, i64 3, i64 0)
593 %tmp2 = add i64 %tmp, %tmp1
598 define void @add_v128i8(ptr %x, ptr %y) vscale_range(2,2) {
599 ; CHECK-LABEL: add_v128i8:
601 ; CHECK-NEXT: vl8r.v v8, (a0)
602 ; CHECK-NEXT: vl8r.v v16, (a1)
603 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
604 ; CHECK-NEXT: vadd.vv v8, v8, v16
605 ; CHECK-NEXT: vs8r.v v8, (a0)
607 %a = load <128 x i8>, ptr %x
608 %b = load <128 x i8>, ptr %y
609 %c = add <128 x i8> %a, %b
610 store <128 x i8> %c, ptr %x
614 define void @add_v16i64(ptr %x, ptr %y) vscale_range(2,2) {
615 ; CHECK-LABEL: add_v16i64:
617 ; CHECK-NEXT: vl8re64.v v8, (a0)
618 ; CHECK-NEXT: vl8re64.v v16, (a1)
619 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
620 ; CHECK-NEXT: vadd.vv v8, v8, v16
621 ; CHECK-NEXT: vs8r.v v8, (a0)
623 %a = load <16 x i64>, ptr %x
624 %b = load <16 x i64>, ptr %y
625 %c = add <16 x i64> %a, %b
626 store <16 x i64> %c, ptr %x
630 define <vscale x 2 x float> @fp_reduction_vfmv_s_f(float %0, <vscale x 8 x float> %1, i64 %2) {
631 ; CHECK-LABEL: fp_reduction_vfmv_s_f:
633 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
634 ; CHECK-NEXT: vfmv.s.f v12, fa0
635 ; CHECK-NEXT: vfredusum.vs v8, v8, v12
637 %4 = tail call <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32.i64(<vscale x 8 x float> poison, float %0, i64 %2)
638 %5 = tail call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv8f32(<vscale x 8 x float> %4, i64 0)
639 %6 = tail call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> poison, <vscale x 8 x float> %1, <vscale x 2 x float> %5, i64 7, i64 %2)
640 ret <vscale x 2 x float> %6
643 define dso_local <vscale x 2 x i32> @int_reduction_vmv_s_x(i32 signext %0, <vscale x 8 x i32> %1, i64 %2) {
644 ; CHECK-LABEL: int_reduction_vmv_s_x:
646 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
647 ; CHECK-NEXT: vmv.s.x v12, a0
648 ; CHECK-NEXT: vredsum.vs v8, v8, v12
650 %4 = tail call <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32.i64(<vscale x 8 x i32> poison, i32 %0, i64 %2)
651 %5 = tail call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %4, i64 0)
652 %6 = tail call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> %1, <vscale x 2 x i32> %5, i64 %2)
653 ret <vscale x 2 x i32> %6
656 declare <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32.i64(<vscale x 8 x float>, float, i64)
657 declare <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv8f32(<vscale x 8 x float>, i64)
658 declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64(<vscale x 2 x float>, <vscale x 8 x float>, <vscale x 2 x float>, i64, i64)
660 declare <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32.i64(<vscale x 8 x i32>, i32, i64) #1
661 declare <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32>, i64 immarg) #2
662 declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32>, <vscale x 8 x i32>, <vscale x 2 x i32>, i64) #1
664 declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
672 declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(
678 declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
679 <vscale x 1 x double>,
680 <vscale x 1 x double>,
681 <vscale x 1 x double>,
687 declare <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(
692 declare <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64
693 (<vscale x 1 x double>,
697 declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg)
698 declare <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32.i64(<vscale x 2 x i32>, <vscale x 2 x i32>* nocapture, i64)
699 declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32.i64(<vscale x 2 x i32>, i32, i64)
700 declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32.i64(<vscale x 2 x i32>, i32, i64)
701 declare <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1.i64(<vscale x 2 x i1>, <vscale x 2 x i1>, i64)
702 declare void @llvm.riscv.vse.mask.nxv2i32.i64(<vscale x 2 x i32>, <vscale x 2 x i32>* nocapture, <vscale x 2 x i1>, i64)
703 declare void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32>, <vscale x 2 x i32>* nocapture, i64)