1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s
7 declare <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
12 define <vscale x 1 x i1> @intrinsic_vmxor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
13 ; CHECK-LABEL: intrinsic_vmxor_mm_nxv1i1:
14 ; CHECK: # %bb.0: # %entry
15 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
16 ; CHECK-NEXT: vmxor.mm v0, v0, v8
19 %a = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
24 ret <vscale x 1 x i1> %a
27 declare <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1(
32 define <vscale x 2 x i1> @intrinsic_vmxor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
33 ; CHECK-LABEL: intrinsic_vmxor_mm_nxv2i1:
34 ; CHECK: # %bb.0: # %entry
35 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
36 ; CHECK-NEXT: vmxor.mm v0, v0, v8
39 %a = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1(
44 ret <vscale x 2 x i1> %a
47 declare <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1(
52 define <vscale x 4 x i1> @intrinsic_vmxor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
53 ; CHECK-LABEL: intrinsic_vmxor_mm_nxv4i1:
54 ; CHECK: # %bb.0: # %entry
55 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
56 ; CHECK-NEXT: vmxor.mm v0, v0, v8
59 %a = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1(
64 ret <vscale x 4 x i1> %a
67 declare <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1(
72 define <vscale x 8 x i1> @intrinsic_vmxor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
73 ; CHECK-LABEL: intrinsic_vmxor_mm_nxv8i1:
74 ; CHECK: # %bb.0: # %entry
75 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
76 ; CHECK-NEXT: vmxor.mm v0, v0, v8
79 %a = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1(
84 ret <vscale x 8 x i1> %a
87 declare <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1(
92 define <vscale x 16 x i1> @intrinsic_vmxor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
93 ; CHECK-LABEL: intrinsic_vmxor_mm_nxv16i1:
94 ; CHECK: # %bb.0: # %entry
95 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
96 ; CHECK-NEXT: vmxor.mm v0, v0, v8
99 %a = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1(
100 <vscale x 16 x i1> %0,
101 <vscale x 16 x i1> %1,
104 ret <vscale x 16 x i1> %a
107 declare <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1(
112 define <vscale x 32 x i1> @intrinsic_vmxor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
113 ; CHECK-LABEL: intrinsic_vmxor_mm_nxv32i1:
114 ; CHECK: # %bb.0: # %entry
115 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
116 ; CHECK-NEXT: vmxor.mm v0, v0, v8
119 %a = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1(
120 <vscale x 32 x i1> %0,
121 <vscale x 32 x i1> %1,
124 ret <vscale x 32 x i1> %a
127 declare <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1(
132 define <vscale x 64 x i1> @intrinsic_vmxor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
133 ; CHECK-LABEL: intrinsic_vmxor_mm_nxv64i1:
134 ; CHECK: # %bb.0: # %entry
135 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
136 ; CHECK-NEXT: vmxor.mm v0, v0, v8
139 %a = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1(
140 <vscale x 64 x i1> %0,
141 <vscale x 64 x i1> %1,
144 ret <vscale x 64 x i1> %a