1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvbc \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbc \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.nxv1i64(
13 define <vscale x 1 x i64> @intrinsic_vclmulh_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vclmulh_vv_nxv1i64_nxv1i64:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
17 ; CHECK-NEXT: vclmulh.vv v8, v8, v9
20 %a = call <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.nxv1i64(
21 <vscale x 1 x i64> undef,
22 <vscale x 1 x i64> %0,
23 <vscale x 1 x i64> %1,
26 ret <vscale x 1 x i64> %a
29 declare <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64(
37 define <vscale x 1 x i64> @intrinsic_vclmulh_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38 ; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv1i64_nxv1i64:
39 ; CHECK: # %bb.0: # %entry
40 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
41 ; CHECK-NEXT: vclmulh.vv v8, v9, v10, v0.t
44 %a = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64(
45 <vscale x 1 x i64> %0,
46 <vscale x 1 x i64> %1,
47 <vscale x 1 x i64> %2,
51 ret <vscale x 1 x i64> %a
54 declare <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.nxv2i64(
60 define <vscale x 2 x i64> @intrinsic_vclmulh_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
61 ; CHECK-LABEL: intrinsic_vclmulh_vv_nxv2i64_nxv2i64:
62 ; CHECK: # %bb.0: # %entry
63 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
64 ; CHECK-NEXT: vclmulh.vv v8, v8, v10
67 %a = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.nxv2i64(
68 <vscale x 2 x i64> undef,
69 <vscale x 2 x i64> %0,
70 <vscale x 2 x i64> %1,
73 ret <vscale x 2 x i64> %a
76 declare <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64(
84 define <vscale x 2 x i64> @intrinsic_vclmulh_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
85 ; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv2i64_nxv2i64:
86 ; CHECK: # %bb.0: # %entry
87 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
88 ; CHECK-NEXT: vclmulh.vv v8, v10, v12, v0.t
91 %a = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64(
92 <vscale x 2 x i64> %0,
93 <vscale x 2 x i64> %1,
94 <vscale x 2 x i64> %2,
98 ret <vscale x 2 x i64> %a
101 declare <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.nxv4i64(
107 define <vscale x 4 x i64> @intrinsic_vclmulh_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
108 ; CHECK-LABEL: intrinsic_vclmulh_vv_nxv4i64_nxv4i64:
109 ; CHECK: # %bb.0: # %entry
110 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
111 ; CHECK-NEXT: vclmulh.vv v8, v8, v12
114 %a = call <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.nxv4i64(
115 <vscale x 4 x i64> undef,
116 <vscale x 4 x i64> %0,
117 <vscale x 4 x i64> %1,
120 ret <vscale x 4 x i64> %a
123 declare <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64(
131 define <vscale x 4 x i64> @intrinsic_vclmulh_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
132 ; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv4i64_nxv4i64:
133 ; CHECK: # %bb.0: # %entry
134 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
135 ; CHECK-NEXT: vclmulh.vv v8, v12, v16, v0.t
138 %a = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64(
139 <vscale x 4 x i64> %0,
140 <vscale x 4 x i64> %1,
141 <vscale x 4 x i64> %2,
142 <vscale x 4 x i1> %3,
145 ret <vscale x 4 x i64> %a
148 declare <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.nxv8i64(
154 define <vscale x 8 x i64> @intrinsic_vclmulh_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
155 ; CHECK-LABEL: intrinsic_vclmulh_vv_nxv8i64_nxv8i64:
156 ; CHECK: # %bb.0: # %entry
157 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
158 ; CHECK-NEXT: vclmulh.vv v8, v8, v16
161 %a = call <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.nxv8i64(
162 <vscale x 8 x i64> undef,
163 <vscale x 8 x i64> %0,
164 <vscale x 8 x i64> %1,
167 ret <vscale x 8 x i64> %a
170 declare <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64(
178 define <vscale x 8 x i64> @intrinsic_vclmulh_mask_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
179 ; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv8i64_nxv8i64:
180 ; CHECK: # %bb.0: # %entry
181 ; CHECK-NEXT: vl8re64.v v24, (a0)
182 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
183 ; CHECK-NEXT: vclmulh.vv v8, v16, v24, v0.t
186 %a = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64(
187 <vscale x 8 x i64> %0,
188 <vscale x 8 x i64> %1,
189 <vscale x 8 x i64> %2,
190 <vscale x 8 x i1> %3,
193 ret <vscale x 8 x i64> %a
196 declare <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.i64(
202 define <vscale x 1 x i64> @intrinsic_vclmulh_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
203 ; RV32-LABEL: intrinsic_vclmulh_vx_nxv1i64_i64:
204 ; RV32: # %bb.0: # %entry
205 ; RV32-NEXT: addi sp, sp, -16
206 ; RV32-NEXT: sw a1, 12(sp)
207 ; RV32-NEXT: sw a0, 8(sp)
208 ; RV32-NEXT: addi a0, sp, 8
209 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
210 ; RV32-NEXT: vlse64.v v9, (a0), zero
211 ; RV32-NEXT: vclmulh.vv v8, v8, v9
212 ; RV32-NEXT: addi sp, sp, 16
215 ; RV64-LABEL: intrinsic_vclmulh_vx_nxv1i64_i64:
216 ; RV64: # %bb.0: # %entry
217 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
218 ; RV64-NEXT: vclmulh.vx v8, v8, a0
221 %a = call <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.i64(
222 <vscale x 1 x i64> undef,
223 <vscale x 1 x i64> %0,
227 ret <vscale x 1 x i64> %a
230 declare <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.i64(
238 define <vscale x 1 x i64> @intrinsic_vclmulh_mask_vx_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
239 ; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv1i64_i64:
240 ; RV32: # %bb.0: # %entry
241 ; RV32-NEXT: addi sp, sp, -16
242 ; RV32-NEXT: sw a1, 12(sp)
243 ; RV32-NEXT: sw a0, 8(sp)
244 ; RV32-NEXT: addi a0, sp, 8
245 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
246 ; RV32-NEXT: vlse64.v v10, (a0), zero
247 ; RV32-NEXT: vclmulh.vv v8, v9, v10, v0.t
248 ; RV32-NEXT: addi sp, sp, 16
251 ; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv1i64_i64:
252 ; RV64: # %bb.0: # %entry
253 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
254 ; RV64-NEXT: vclmulh.vx v8, v9, a0, v0.t
257 %a = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.i64(
258 <vscale x 1 x i64> %0,
259 <vscale x 1 x i64> %1,
261 <vscale x 1 x i1> %3,
264 ret <vscale x 1 x i64> %a
267 declare <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.i64(
273 define <vscale x 2 x i64> @intrinsic_vclmulh_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
274 ; RV32-LABEL: intrinsic_vclmulh_vx_nxv2i64_i64:
275 ; RV32: # %bb.0: # %entry
276 ; RV32-NEXT: addi sp, sp, -16
277 ; RV32-NEXT: sw a1, 12(sp)
278 ; RV32-NEXT: sw a0, 8(sp)
279 ; RV32-NEXT: addi a0, sp, 8
280 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
281 ; RV32-NEXT: vlse64.v v10, (a0), zero
282 ; RV32-NEXT: vclmulh.vv v8, v8, v10
283 ; RV32-NEXT: addi sp, sp, 16
286 ; RV64-LABEL: intrinsic_vclmulh_vx_nxv2i64_i64:
287 ; RV64: # %bb.0: # %entry
288 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
289 ; RV64-NEXT: vclmulh.vx v8, v8, a0
292 %a = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.i64(
293 <vscale x 2 x i64> undef,
294 <vscale x 2 x i64> %0,
298 ret <vscale x 2 x i64> %a
301 declare <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.i64(
309 define <vscale x 2 x i64> @intrinsic_vclmulh_mask_vx_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
310 ; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv2i64_i64:
311 ; RV32: # %bb.0: # %entry
312 ; RV32-NEXT: addi sp, sp, -16
313 ; RV32-NEXT: sw a1, 12(sp)
314 ; RV32-NEXT: sw a0, 8(sp)
315 ; RV32-NEXT: addi a0, sp, 8
316 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
317 ; RV32-NEXT: vlse64.v v12, (a0), zero
318 ; RV32-NEXT: vclmulh.vv v8, v10, v12, v0.t
319 ; RV32-NEXT: addi sp, sp, 16
322 ; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv2i64_i64:
323 ; RV64: # %bb.0: # %entry
324 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
325 ; RV64-NEXT: vclmulh.vx v8, v10, a0, v0.t
328 %a = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.i64(
329 <vscale x 2 x i64> %0,
330 <vscale x 2 x i64> %1,
332 <vscale x 2 x i1> %3,
335 ret <vscale x 2 x i64> %a
338 declare <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.i64(
344 define <vscale x 4 x i64> @intrinsic_vclmulh_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
345 ; RV32-LABEL: intrinsic_vclmulh_vx_nxv4i64_i64:
346 ; RV32: # %bb.0: # %entry
347 ; RV32-NEXT: addi sp, sp, -16
348 ; RV32-NEXT: sw a1, 12(sp)
349 ; RV32-NEXT: sw a0, 8(sp)
350 ; RV32-NEXT: addi a0, sp, 8
351 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
352 ; RV32-NEXT: vlse64.v v12, (a0), zero
353 ; RV32-NEXT: vclmulh.vv v8, v8, v12
354 ; RV32-NEXT: addi sp, sp, 16
357 ; RV64-LABEL: intrinsic_vclmulh_vx_nxv4i64_i64:
358 ; RV64: # %bb.0: # %entry
359 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
360 ; RV64-NEXT: vclmulh.vx v8, v8, a0
363 %a = call <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.i64(
364 <vscale x 4 x i64> undef,
365 <vscale x 4 x i64> %0,
369 ret <vscale x 4 x i64> %a
372 declare <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.i64(
380 define <vscale x 4 x i64> @intrinsic_vclmulh_mask_vx_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
381 ; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv4i64_i64:
382 ; RV32: # %bb.0: # %entry
383 ; RV32-NEXT: addi sp, sp, -16
384 ; RV32-NEXT: sw a1, 12(sp)
385 ; RV32-NEXT: sw a0, 8(sp)
386 ; RV32-NEXT: addi a0, sp, 8
387 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
388 ; RV32-NEXT: vlse64.v v16, (a0), zero
389 ; RV32-NEXT: vclmulh.vv v8, v12, v16, v0.t
390 ; RV32-NEXT: addi sp, sp, 16
393 ; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv4i64_i64:
394 ; RV64: # %bb.0: # %entry
395 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
396 ; RV64-NEXT: vclmulh.vx v8, v12, a0, v0.t
399 %a = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.i64(
400 <vscale x 4 x i64> %0,
401 <vscale x 4 x i64> %1,
403 <vscale x 4 x i1> %3,
406 ret <vscale x 4 x i64> %a
409 declare <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.i64(
415 define <vscale x 8 x i64> @intrinsic_vclmulh_vx_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
416 ; RV32-LABEL: intrinsic_vclmulh_vx_nxv8i64_i64:
417 ; RV32: # %bb.0: # %entry
418 ; RV32-NEXT: addi sp, sp, -16
419 ; RV32-NEXT: sw a1, 12(sp)
420 ; RV32-NEXT: sw a0, 8(sp)
421 ; RV32-NEXT: addi a0, sp, 8
422 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
423 ; RV32-NEXT: vlse64.v v16, (a0), zero
424 ; RV32-NEXT: vclmulh.vv v8, v8, v16
425 ; RV32-NEXT: addi sp, sp, 16
428 ; RV64-LABEL: intrinsic_vclmulh_vx_nxv8i64_i64:
429 ; RV64: # %bb.0: # %entry
430 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
431 ; RV64-NEXT: vclmulh.vx v8, v8, a0
434 %a = call <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.i64(
435 <vscale x 8 x i64> undef,
436 <vscale x 8 x i64> %0,
440 ret <vscale x 8 x i64> %a
443 declare <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.i64(
451 define <vscale x 8 x i64> @intrinsic_vclmulh_mask_vx_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
452 ; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv8i64_i64:
453 ; RV32: # %bb.0: # %entry
454 ; RV32-NEXT: addi sp, sp, -16
455 ; RV32-NEXT: sw a1, 12(sp)
456 ; RV32-NEXT: sw a0, 8(sp)
457 ; RV32-NEXT: addi a0, sp, 8
458 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
459 ; RV32-NEXT: vlse64.v v24, (a0), zero
460 ; RV32-NEXT: vclmulh.vv v8, v16, v24, v0.t
461 ; RV32-NEXT: addi sp, sp, 16
464 ; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv8i64_i64:
465 ; RV64: # %bb.0: # %entry
466 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
467 ; RV64-NEXT: vclmulh.vx v8, v16, a0, v0.t
470 %a = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.i64(
471 <vscale x 8 x i64> %0,
472 <vscale x 8 x i64> %1,
474 <vscale x 8 x i1> %3,
477 ret <vscale x 8 x i64> %a