1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
4 // RUN: -target-feature +zvfh -disable-O0-optnone \
5 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
6 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
8 #include <riscv_vector.h>
10 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgt_vv_i8mf8_b64_mu
11 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
12 // CHECK-RV64-NEXT: entry:
13 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
14 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
16 vbool64_t
test_vmsgt_vv_i8mf8_b64_mu(vbool64_t mask
, vbool64_t maskedoff
, vint8mf8_t op1
, vint8mf8_t op2
, size_t vl
) {
17 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
20 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgt_vx_i8mf8_b64_mu
21 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
22 // CHECK-RV64-NEXT: entry:
23 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
24 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
26 vbool64_t
test_vmsgt_vx_i8mf8_b64_mu(vbool64_t mask
, vbool64_t maskedoff
, vint8mf8_t op1
, int8_t op2
, size_t vl
) {
27 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
30 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgt_vv_i8mf4_b32_mu
31 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
32 // CHECK-RV64-NEXT: entry:
33 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
34 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
36 vbool32_t
test_vmsgt_vv_i8mf4_b32_mu(vbool32_t mask
, vbool32_t maskedoff
, vint8mf4_t op1
, vint8mf4_t op2
, size_t vl
) {
37 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
40 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgt_vx_i8mf4_b32_mu
41 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
42 // CHECK-RV64-NEXT: entry:
43 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
44 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
46 vbool32_t
test_vmsgt_vx_i8mf4_b32_mu(vbool32_t mask
, vbool32_t maskedoff
, vint8mf4_t op1
, int8_t op2
, size_t vl
) {
47 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
50 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgt_vv_i8mf2_b16_mu
51 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
52 // CHECK-RV64-NEXT: entry:
53 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
54 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
56 vbool16_t
test_vmsgt_vv_i8mf2_b16_mu(vbool16_t mask
, vbool16_t maskedoff
, vint8mf2_t op1
, vint8mf2_t op2
, size_t vl
) {
57 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
60 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgt_vx_i8mf2_b16_mu
61 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
62 // CHECK-RV64-NEXT: entry:
63 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
64 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
66 vbool16_t
test_vmsgt_vx_i8mf2_b16_mu(vbool16_t mask
, vbool16_t maskedoff
, vint8mf2_t op1
, int8_t op2
, size_t vl
) {
67 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
70 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgt_vv_i8m1_b8_mu
71 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
72 // CHECK-RV64-NEXT: entry:
73 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
74 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
76 vbool8_t
test_vmsgt_vv_i8m1_b8_mu(vbool8_t mask
, vbool8_t maskedoff
, vint8m1_t op1
, vint8m1_t op2
, size_t vl
) {
77 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
80 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgt_vx_i8m1_b8_mu
81 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
82 // CHECK-RV64-NEXT: entry:
83 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
84 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
86 vbool8_t
test_vmsgt_vx_i8m1_b8_mu(vbool8_t mask
, vbool8_t maskedoff
, vint8m1_t op1
, int8_t op2
, size_t vl
) {
87 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
90 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmsgt_vv_i8m2_b4_mu
91 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
92 // CHECK-RV64-NEXT: entry:
93 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
94 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
96 vbool4_t
test_vmsgt_vv_i8m2_b4_mu(vbool4_t mask
, vbool4_t maskedoff
, vint8m2_t op1
, vint8m2_t op2
, size_t vl
) {
97 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
100 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmsgt_vx_i8m2_b4_mu
101 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
102 // CHECK-RV64-NEXT: entry:
103 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
104 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
106 vbool4_t
test_vmsgt_vx_i8m2_b4_mu(vbool4_t mask
, vbool4_t maskedoff
, vint8m2_t op1
, int8_t op2
, size_t vl
) {
107 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
110 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmsgt_vv_i8m4_b2_mu
111 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
112 // CHECK-RV64-NEXT: entry:
113 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
114 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
116 vbool2_t
test_vmsgt_vv_i8m4_b2_mu(vbool2_t mask
, vbool2_t maskedoff
, vint8m4_t op1
, vint8m4_t op2
, size_t vl
) {
117 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
120 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmsgt_vx_i8m4_b2_mu
121 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
122 // CHECK-RV64-NEXT: entry:
123 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
124 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
126 vbool2_t
test_vmsgt_vx_i8m4_b2_mu(vbool2_t mask
, vbool2_t maskedoff
, vint8m4_t op1
, int8_t op2
, size_t vl
) {
127 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
130 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i1> @test_vmsgt_vv_i8m8_b1_mu
131 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
132 // CHECK-RV64-NEXT: entry:
133 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgt.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]])
134 // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
136 vbool1_t
test_vmsgt_vv_i8m8_b1_mu(vbool1_t mask
, vbool1_t maskedoff
, vint8m8_t op1
, vint8m8_t op2
, size_t vl
) {
137 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
140 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i1> @test_vmsgt_vx_i8m8_b1_mu
141 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
142 // CHECK-RV64-NEXT: entry:
143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgt.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]])
144 // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
146 vbool1_t
test_vmsgt_vx_i8m8_b1_mu(vbool1_t mask
, vbool1_t maskedoff
, vint8m8_t op1
, int8_t op2
, size_t vl
) {
147 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
150 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgt_vv_i16mf4_b64_mu
151 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
152 // CHECK-RV64-NEXT: entry:
153 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
154 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
156 vbool64_t
test_vmsgt_vv_i16mf4_b64_mu(vbool64_t mask
, vbool64_t maskedoff
, vint16mf4_t op1
, vint16mf4_t op2
, size_t vl
) {
157 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
160 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgt_vx_i16mf4_b64_mu
161 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
162 // CHECK-RV64-NEXT: entry:
163 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
164 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
166 vbool64_t
test_vmsgt_vx_i16mf4_b64_mu(vbool64_t mask
, vbool64_t maskedoff
, vint16mf4_t op1
, int16_t op2
, size_t vl
) {
167 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
170 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgt_vv_i16mf2_b32_mu
171 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
172 // CHECK-RV64-NEXT: entry:
173 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
174 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
176 vbool32_t
test_vmsgt_vv_i16mf2_b32_mu(vbool32_t mask
, vbool32_t maskedoff
, vint16mf2_t op1
, vint16mf2_t op2
, size_t vl
) {
177 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
180 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgt_vx_i16mf2_b32_mu
181 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
182 // CHECK-RV64-NEXT: entry:
183 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
184 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
186 vbool32_t
test_vmsgt_vx_i16mf2_b32_mu(vbool32_t mask
, vbool32_t maskedoff
, vint16mf2_t op1
, int16_t op2
, size_t vl
) {
187 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
190 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgt_vv_i16m1_b16_mu
191 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
192 // CHECK-RV64-NEXT: entry:
193 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
194 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
196 vbool16_t
test_vmsgt_vv_i16m1_b16_mu(vbool16_t mask
, vbool16_t maskedoff
, vint16m1_t op1
, vint16m1_t op2
, size_t vl
) {
197 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
200 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgt_vx_i16m1_b16_mu
201 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
202 // CHECK-RV64-NEXT: entry:
203 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
204 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
206 vbool16_t
test_vmsgt_vx_i16m1_b16_mu(vbool16_t mask
, vbool16_t maskedoff
, vint16m1_t op1
, int16_t op2
, size_t vl
) {
207 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
210 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgt_vv_i16m2_b8_mu
211 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
212 // CHECK-RV64-NEXT: entry:
213 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
214 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
216 vbool8_t
test_vmsgt_vv_i16m2_b8_mu(vbool8_t mask
, vbool8_t maskedoff
, vint16m2_t op1
, vint16m2_t op2
, size_t vl
) {
217 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
220 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgt_vx_i16m2_b8_mu
221 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
222 // CHECK-RV64-NEXT: entry:
223 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
224 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
226 vbool8_t
test_vmsgt_vx_i16m2_b8_mu(vbool8_t mask
, vbool8_t maskedoff
, vint16m2_t op1
, int16_t op2
, size_t vl
) {
227 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
230 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmsgt_vv_i16m4_b4_mu
231 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
232 // CHECK-RV64-NEXT: entry:
233 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
234 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
236 vbool4_t
test_vmsgt_vv_i16m4_b4_mu(vbool4_t mask
, vbool4_t maskedoff
, vint16m4_t op1
, vint16m4_t op2
, size_t vl
) {
237 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
240 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmsgt_vx_i16m4_b4_mu
241 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
242 // CHECK-RV64-NEXT: entry:
243 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
244 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
246 vbool4_t
test_vmsgt_vx_i16m4_b4_mu(vbool4_t mask
, vbool4_t maskedoff
, vint16m4_t op1
, int16_t op2
, size_t vl
) {
247 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
250 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmsgt_vv_i16m8_b2_mu
251 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
252 // CHECK-RV64-NEXT: entry:
253 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
254 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
256 vbool2_t
test_vmsgt_vv_i16m8_b2_mu(vbool2_t mask
, vbool2_t maskedoff
, vint16m8_t op1
, vint16m8_t op2
, size_t vl
) {
257 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
260 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmsgt_vx_i16m8_b2_mu
261 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
262 // CHECK-RV64-NEXT: entry:
263 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
264 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
266 vbool2_t
test_vmsgt_vx_i16m8_b2_mu(vbool2_t mask
, vbool2_t maskedoff
, vint16m8_t op1
, int16_t op2
, size_t vl
) {
267 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
270 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgt_vv_i32mf2_b64_mu
271 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
272 // CHECK-RV64-NEXT: entry:
273 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
274 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
276 vbool64_t
test_vmsgt_vv_i32mf2_b64_mu(vbool64_t mask
, vbool64_t maskedoff
, vint32mf2_t op1
, vint32mf2_t op2
, size_t vl
) {
277 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
280 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgt_vx_i32mf2_b64_mu
281 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
282 // CHECK-RV64-NEXT: entry:
283 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
284 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
286 vbool64_t
test_vmsgt_vx_i32mf2_b64_mu(vbool64_t mask
, vbool64_t maskedoff
, vint32mf2_t op1
, int32_t op2
, size_t vl
) {
287 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
290 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgt_vv_i32m1_b32_mu
291 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
292 // CHECK-RV64-NEXT: entry:
293 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
294 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
296 vbool32_t
test_vmsgt_vv_i32m1_b32_mu(vbool32_t mask
, vbool32_t maskedoff
, vint32m1_t op1
, vint32m1_t op2
, size_t vl
) {
297 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
300 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgt_vx_i32m1_b32_mu
301 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
302 // CHECK-RV64-NEXT: entry:
303 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
304 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
306 vbool32_t
test_vmsgt_vx_i32m1_b32_mu(vbool32_t mask
, vbool32_t maskedoff
, vint32m1_t op1
, int32_t op2
, size_t vl
) {
307 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
310 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgt_vv_i32m2_b16_mu
311 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
312 // CHECK-RV64-NEXT: entry:
313 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
314 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
316 vbool16_t
test_vmsgt_vv_i32m2_b16_mu(vbool16_t mask
, vbool16_t maskedoff
, vint32m2_t op1
, vint32m2_t op2
, size_t vl
) {
317 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
320 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgt_vx_i32m2_b16_mu
321 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
322 // CHECK-RV64-NEXT: entry:
323 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
324 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
326 vbool16_t
test_vmsgt_vx_i32m2_b16_mu(vbool16_t mask
, vbool16_t maskedoff
, vint32m2_t op1
, int32_t op2
, size_t vl
) {
327 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
330 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgt_vv_i32m4_b8_mu
331 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
332 // CHECK-RV64-NEXT: entry:
333 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
334 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
336 vbool8_t
test_vmsgt_vv_i32m4_b8_mu(vbool8_t mask
, vbool8_t maskedoff
, vint32m4_t op1
, vint32m4_t op2
, size_t vl
) {
337 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
340 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgt_vx_i32m4_b8_mu
341 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
342 // CHECK-RV64-NEXT: entry:
343 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
344 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
346 vbool8_t
test_vmsgt_vx_i32m4_b8_mu(vbool8_t mask
, vbool8_t maskedoff
, vint32m4_t op1
, int32_t op2
, size_t vl
) {
347 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
350 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmsgt_vv_i32m8_b4_mu
351 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
352 // CHECK-RV64-NEXT: entry:
353 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
354 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
356 vbool4_t
test_vmsgt_vv_i32m8_b4_mu(vbool4_t mask
, vbool4_t maskedoff
, vint32m8_t op1
, vint32m8_t op2
, size_t vl
) {
357 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
360 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmsgt_vx_i32m8_b4_mu
361 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
362 // CHECK-RV64-NEXT: entry:
363 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
364 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
366 vbool4_t
test_vmsgt_vx_i32m8_b4_mu(vbool4_t mask
, vbool4_t maskedoff
, vint32m8_t op1
, int32_t op2
, size_t vl
) {
367 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
370 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgt_vv_i64m1_b64_mu
371 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
372 // CHECK-RV64-NEXT: entry:
373 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
374 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
376 vbool64_t
test_vmsgt_vv_i64m1_b64_mu(vbool64_t mask
, vbool64_t maskedoff
, vint64m1_t op1
, vint64m1_t op2
, size_t vl
) {
377 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
380 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgt_vx_i64m1_b64_mu
381 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
382 // CHECK-RV64-NEXT: entry:
383 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
384 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
386 vbool64_t
test_vmsgt_vx_i64m1_b64_mu(vbool64_t mask
, vbool64_t maskedoff
, vint64m1_t op1
, int64_t op2
, size_t vl
) {
387 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
390 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgt_vv_i64m2_b32_mu
391 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
392 // CHECK-RV64-NEXT: entry:
393 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
394 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
396 vbool32_t
test_vmsgt_vv_i64m2_b32_mu(vbool32_t mask
, vbool32_t maskedoff
, vint64m2_t op1
, vint64m2_t op2
, size_t vl
) {
397 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
400 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgt_vx_i64m2_b32_mu
401 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
402 // CHECK-RV64-NEXT: entry:
403 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
404 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
406 vbool32_t
test_vmsgt_vx_i64m2_b32_mu(vbool32_t mask
, vbool32_t maskedoff
, vint64m2_t op1
, int64_t op2
, size_t vl
) {
407 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
410 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgt_vv_i64m4_b16_mu
411 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
412 // CHECK-RV64-NEXT: entry:
413 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
414 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
416 vbool16_t
test_vmsgt_vv_i64m4_b16_mu(vbool16_t mask
, vbool16_t maskedoff
, vint64m4_t op1
, vint64m4_t op2
, size_t vl
) {
417 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
420 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgt_vx_i64m4_b16_mu
421 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
422 // CHECK-RV64-NEXT: entry:
423 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
424 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
426 vbool16_t
test_vmsgt_vx_i64m4_b16_mu(vbool16_t mask
, vbool16_t maskedoff
, vint64m4_t op1
, int64_t op2
, size_t vl
) {
427 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
430 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgt_vv_i64m8_b8_mu
431 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
432 // CHECK-RV64-NEXT: entry:
433 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
434 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
436 vbool8_t
test_vmsgt_vv_i64m8_b8_mu(vbool8_t mask
, vbool8_t maskedoff
, vint64m8_t op1
, vint64m8_t op2
, size_t vl
) {
437 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);
440 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgt_vx_i64m8_b8_mu
441 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
442 // CHECK-RV64-NEXT: entry:
443 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
444 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
446 vbool8_t
test_vmsgt_vx_i64m8_b8_mu(vbool8_t mask
, vbool8_t maskedoff
, vint64m8_t op1
, int64_t op2
, size_t vl
) {
447 return __riscv_vmsgt_mu(mask
, maskedoff
, op1
, op2
, vl
);