1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
4 // RUN: -target-feature +zvfh -disable-O0-optnone \
5 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
6 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
8 #include <riscv_vector.h>
10 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vneg_v_i8mf8_tu
11 // CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
12 // CHECK-RV64-NEXT: entry:
13 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 0, i64 [[VL]])
14 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
16 vint8mf8_t
test_vneg_v_i8mf8_tu(vint8mf8_t maskedoff
, vint8mf8_t op1
, size_t vl
) {
17 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
20 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vneg_v_i8mf4_tu
21 // CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
22 // CHECK-RV64-NEXT: entry:
23 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 0, i64 [[VL]])
24 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
26 vint8mf4_t
test_vneg_v_i8mf4_tu(vint8mf4_t maskedoff
, vint8mf4_t op1
, size_t vl
) {
27 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
30 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vneg_v_i8mf2_tu
31 // CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
32 // CHECK-RV64-NEXT: entry:
33 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 0, i64 [[VL]])
34 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
36 vint8mf2_t
test_vneg_v_i8mf2_tu(vint8mf2_t maskedoff
, vint8mf2_t op1
, size_t vl
) {
37 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
40 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vneg_v_i8m1_tu
41 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
42 // CHECK-RV64-NEXT: entry:
43 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 0, i64 [[VL]])
44 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
46 vint8m1_t
test_vneg_v_i8m1_tu(vint8m1_t maskedoff
, vint8m1_t op1
, size_t vl
) {
47 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
50 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vneg_v_i8m2_tu
51 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
52 // CHECK-RV64-NEXT: entry:
53 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 0, i64 [[VL]])
54 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
56 vint8m2_t
test_vneg_v_i8m2_tu(vint8m2_t maskedoff
, vint8m2_t op1
, size_t vl
) {
57 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
60 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vneg_v_i8m4_tu
61 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
62 // CHECK-RV64-NEXT: entry:
63 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 0, i64 [[VL]])
64 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
66 vint8m4_t
test_vneg_v_i8m4_tu(vint8m4_t maskedoff
, vint8m4_t op1
, size_t vl
) {
67 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
70 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vneg_v_i8m8_tu
71 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
72 // CHECK-RV64-NEXT: entry:
73 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 0, i64 [[VL]])
74 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
76 vint8m8_t
test_vneg_v_i8m8_tu(vint8m8_t maskedoff
, vint8m8_t op1
, size_t vl
) {
77 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
80 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vneg_v_i16mf4_tu
81 // CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
82 // CHECK-RV64-NEXT: entry:
83 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 0, i64 [[VL]])
84 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
86 vint16mf4_t
test_vneg_v_i16mf4_tu(vint16mf4_t maskedoff
, vint16mf4_t op1
, size_t vl
) {
87 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
90 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vneg_v_i16mf2_tu
91 // CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
92 // CHECK-RV64-NEXT: entry:
93 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 0, i64 [[VL]])
94 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
96 vint16mf2_t
test_vneg_v_i16mf2_tu(vint16mf2_t maskedoff
, vint16mf2_t op1
, size_t vl
) {
97 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
100 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vneg_v_i16m1_tu
101 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
102 // CHECK-RV64-NEXT: entry:
103 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 0, i64 [[VL]])
104 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
106 vint16m1_t
test_vneg_v_i16m1_tu(vint16m1_t maskedoff
, vint16m1_t op1
, size_t vl
) {
107 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
110 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vneg_v_i16m2_tu
111 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
112 // CHECK-RV64-NEXT: entry:
113 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 0, i64 [[VL]])
114 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
116 vint16m2_t
test_vneg_v_i16m2_tu(vint16m2_t maskedoff
, vint16m2_t op1
, size_t vl
) {
117 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
120 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vneg_v_i16m4_tu
121 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
122 // CHECK-RV64-NEXT: entry:
123 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 0, i64 [[VL]])
124 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
126 vint16m4_t
test_vneg_v_i16m4_tu(vint16m4_t maskedoff
, vint16m4_t op1
, size_t vl
) {
127 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
130 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vneg_v_i16m8_tu
131 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
132 // CHECK-RV64-NEXT: entry:
133 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 0, i64 [[VL]])
134 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
136 vint16m8_t
test_vneg_v_i16m8_tu(vint16m8_t maskedoff
, vint16m8_t op1
, size_t vl
) {
137 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
140 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vneg_v_i32mf2_tu
141 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
142 // CHECK-RV64-NEXT: entry:
143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 0, i64 [[VL]])
144 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
146 vint32mf2_t
test_vneg_v_i32mf2_tu(vint32mf2_t maskedoff
, vint32mf2_t op1
, size_t vl
) {
147 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
150 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vneg_v_i32m1_tu
151 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
152 // CHECK-RV64-NEXT: entry:
153 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 0, i64 [[VL]])
154 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
156 vint32m1_t
test_vneg_v_i32m1_tu(vint32m1_t maskedoff
, vint32m1_t op1
, size_t vl
) {
157 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
160 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vneg_v_i32m2_tu
161 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
162 // CHECK-RV64-NEXT: entry:
163 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 0, i64 [[VL]])
164 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
166 vint32m2_t
test_vneg_v_i32m2_tu(vint32m2_t maskedoff
, vint32m2_t op1
, size_t vl
) {
167 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
170 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vneg_v_i32m4_tu
171 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
172 // CHECK-RV64-NEXT: entry:
173 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 0, i64 [[VL]])
174 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
176 vint32m4_t
test_vneg_v_i32m4_tu(vint32m4_t maskedoff
, vint32m4_t op1
, size_t vl
) {
177 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
180 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vneg_v_i32m8_tu
181 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
182 // CHECK-RV64-NEXT: entry:
183 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 0, i64 [[VL]])
184 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
186 vint32m8_t
test_vneg_v_i32m8_tu(vint32m8_t maskedoff
, vint32m8_t op1
, size_t vl
) {
187 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
190 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vneg_v_i64m1_tu
191 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
192 // CHECK-RV64-NEXT: entry:
193 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 0, i64 [[VL]])
194 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
196 vint64m1_t
test_vneg_v_i64m1_tu(vint64m1_t maskedoff
, vint64m1_t op1
, size_t vl
) {
197 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
200 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vneg_v_i64m2_tu
201 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
202 // CHECK-RV64-NEXT: entry:
203 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 0, i64 [[VL]])
204 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
206 vint64m2_t
test_vneg_v_i64m2_tu(vint64m2_t maskedoff
, vint64m2_t op1
, size_t vl
) {
207 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
210 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vneg_v_i64m4_tu
211 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
212 // CHECK-RV64-NEXT: entry:
213 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 0, i64 [[VL]])
214 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
216 vint64m4_t
test_vneg_v_i64m4_tu(vint64m4_t maskedoff
, vint64m4_t op1
, size_t vl
) {
217 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
220 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vneg_v_i64m8_tu
221 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
222 // CHECK-RV64-NEXT: entry:
223 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 0, i64 [[VL]])
224 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
226 vint64m8_t
test_vneg_v_i64m8_tu(vint64m8_t maskedoff
, vint64m8_t op1
, size_t vl
) {
227 return __riscv_vneg_tu(maskedoff
, op1
, vl
);
230 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vneg_v_i8mf8_tum
231 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
232 // CHECK-RV64-NEXT: entry:
233 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 0, <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
234 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
236 vint8mf8_t
test_vneg_v_i8mf8_tum(vbool64_t mask
, vint8mf8_t maskedoff
, vint8mf8_t op1
, size_t vl
) {
237 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
240 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vneg_v_i8mf4_tum
241 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
242 // CHECK-RV64-NEXT: entry:
243 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 0, <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
244 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
246 vint8mf4_t
test_vneg_v_i8mf4_tum(vbool32_t mask
, vint8mf4_t maskedoff
, vint8mf4_t op1
, size_t vl
) {
247 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
250 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vneg_v_i8mf2_tum
251 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
252 // CHECK-RV64-NEXT: entry:
253 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 0, <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
254 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
256 vint8mf2_t
test_vneg_v_i8mf2_tum(vbool16_t mask
, vint8mf2_t maskedoff
, vint8mf2_t op1
, size_t vl
) {
257 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
260 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vneg_v_i8m1_tum
261 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
262 // CHECK-RV64-NEXT: entry:
263 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 0, <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
264 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
266 vint8m1_t
test_vneg_v_i8m1_tum(vbool8_t mask
, vint8m1_t maskedoff
, vint8m1_t op1
, size_t vl
) {
267 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
270 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vneg_v_i8m2_tum
271 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
272 // CHECK-RV64-NEXT: entry:
273 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 0, <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
274 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
276 vint8m2_t
test_vneg_v_i8m2_tum(vbool4_t mask
, vint8m2_t maskedoff
, vint8m2_t op1
, size_t vl
) {
277 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
280 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vneg_v_i8m4_tum
281 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
282 // CHECK-RV64-NEXT: entry:
283 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 0, <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
284 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
286 vint8m4_t
test_vneg_v_i8m4_tum(vbool2_t mask
, vint8m4_t maskedoff
, vint8m4_t op1
, size_t vl
) {
287 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
290 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vneg_v_i8m8_tum
291 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
292 // CHECK-RV64-NEXT: entry:
293 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 0, <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2)
294 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
296 vint8m8_t
test_vneg_v_i8m8_tum(vbool1_t mask
, vint8m8_t maskedoff
, vint8m8_t op1
, size_t vl
) {
297 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
300 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vneg_v_i16mf4_tum
301 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
302 // CHECK-RV64-NEXT: entry:
303 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 0, <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
304 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
306 vint16mf4_t
test_vneg_v_i16mf4_tum(vbool64_t mask
, vint16mf4_t maskedoff
, vint16mf4_t op1
, size_t vl
) {
307 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
310 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vneg_v_i16mf2_tum
311 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
312 // CHECK-RV64-NEXT: entry:
313 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 0, <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
314 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
316 vint16mf2_t
test_vneg_v_i16mf2_tum(vbool32_t mask
, vint16mf2_t maskedoff
, vint16mf2_t op1
, size_t vl
) {
317 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
320 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vneg_v_i16m1_tum
321 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
322 // CHECK-RV64-NEXT: entry:
323 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 0, <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
324 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
326 vint16m1_t
test_vneg_v_i16m1_tum(vbool16_t mask
, vint16m1_t maskedoff
, vint16m1_t op1
, size_t vl
) {
327 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
330 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vneg_v_i16m2_tum
331 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
332 // CHECK-RV64-NEXT: entry:
333 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 0, <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
334 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
336 vint16m2_t
test_vneg_v_i16m2_tum(vbool8_t mask
, vint16m2_t maskedoff
, vint16m2_t op1
, size_t vl
) {
337 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
340 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vneg_v_i16m4_tum
341 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
342 // CHECK-RV64-NEXT: entry:
343 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 0, <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
344 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
346 vint16m4_t
test_vneg_v_i16m4_tum(vbool4_t mask
, vint16m4_t maskedoff
, vint16m4_t op1
, size_t vl
) {
347 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
350 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vneg_v_i16m8_tum
351 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
352 // CHECK-RV64-NEXT: entry:
353 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 0, <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
354 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
356 vint16m8_t
test_vneg_v_i16m8_tum(vbool2_t mask
, vint16m8_t maskedoff
, vint16m8_t op1
, size_t vl
) {
357 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
360 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vneg_v_i32mf2_tum
361 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
362 // CHECK-RV64-NEXT: entry:
363 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 0, <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
364 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
366 vint32mf2_t
test_vneg_v_i32mf2_tum(vbool64_t mask
, vint32mf2_t maskedoff
, vint32mf2_t op1
, size_t vl
) {
367 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
370 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vneg_v_i32m1_tum
371 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
372 // CHECK-RV64-NEXT: entry:
373 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 0, <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
374 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
376 vint32m1_t
test_vneg_v_i32m1_tum(vbool32_t mask
, vint32m1_t maskedoff
, vint32m1_t op1
, size_t vl
) {
377 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
380 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vneg_v_i32m2_tum
381 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
382 // CHECK-RV64-NEXT: entry:
383 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 0, <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
384 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
386 vint32m2_t
test_vneg_v_i32m2_tum(vbool16_t mask
, vint32m2_t maskedoff
, vint32m2_t op1
, size_t vl
) {
387 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
390 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vneg_v_i32m4_tum
391 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
392 // CHECK-RV64-NEXT: entry:
393 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 0, <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
394 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
396 vint32m4_t
test_vneg_v_i32m4_tum(vbool8_t mask
, vint32m4_t maskedoff
, vint32m4_t op1
, size_t vl
) {
397 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
400 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vneg_v_i32m8_tum
401 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
402 // CHECK-RV64-NEXT: entry:
403 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 0, <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
404 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
406 vint32m8_t
test_vneg_v_i32m8_tum(vbool4_t mask
, vint32m8_t maskedoff
, vint32m8_t op1
, size_t vl
) {
407 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
410 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vneg_v_i64m1_tum
411 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
412 // CHECK-RV64-NEXT: entry:
413 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 0, <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
414 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
416 vint64m1_t
test_vneg_v_i64m1_tum(vbool64_t mask
, vint64m1_t maskedoff
, vint64m1_t op1
, size_t vl
) {
417 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
420 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vneg_v_i64m2_tum
421 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
422 // CHECK-RV64-NEXT: entry:
423 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 0, <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
424 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
426 vint64m2_t
test_vneg_v_i64m2_tum(vbool32_t mask
, vint64m2_t maskedoff
, vint64m2_t op1
, size_t vl
) {
427 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
430 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vneg_v_i64m4_tum
431 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
432 // CHECK-RV64-NEXT: entry:
433 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 0, <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
434 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
436 vint64m4_t
test_vneg_v_i64m4_tum(vbool16_t mask
, vint64m4_t maskedoff
, vint64m4_t op1
, size_t vl
) {
437 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
440 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vneg_v_i64m8_tum
441 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
442 // CHECK-RV64-NEXT: entry:
443 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 0, <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
444 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
446 vint64m8_t
test_vneg_v_i64m8_tum(vbool8_t mask
, vint64m8_t maskedoff
, vint64m8_t op1
, size_t vl
) {
447 return __riscv_vneg_tum(mask
, maskedoff
, op1
, vl
);
450 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vneg_v_i8mf8_tumu
451 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
452 // CHECK-RV64-NEXT: entry:
453 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 0, <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
454 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
456 vint8mf8_t
test_vneg_v_i8mf8_tumu(vbool64_t mask
, vint8mf8_t maskedoff
, vint8mf8_t op1
, size_t vl
) {
457 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
460 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vneg_v_i8mf4_tumu
461 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
462 // CHECK-RV64-NEXT: entry:
463 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 0, <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
464 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
466 vint8mf4_t
test_vneg_v_i8mf4_tumu(vbool32_t mask
, vint8mf4_t maskedoff
, vint8mf4_t op1
, size_t vl
) {
467 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
470 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vneg_v_i8mf2_tumu
471 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
472 // CHECK-RV64-NEXT: entry:
473 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 0, <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
474 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
476 vint8mf2_t
test_vneg_v_i8mf2_tumu(vbool16_t mask
, vint8mf2_t maskedoff
, vint8mf2_t op1
, size_t vl
) {
477 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
480 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vneg_v_i8m1_tumu
481 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
482 // CHECK-RV64-NEXT: entry:
483 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 0, <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
484 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
486 vint8m1_t
test_vneg_v_i8m1_tumu(vbool8_t mask
, vint8m1_t maskedoff
, vint8m1_t op1
, size_t vl
) {
487 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
490 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vneg_v_i8m2_tumu
491 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
492 // CHECK-RV64-NEXT: entry:
493 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 0, <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
494 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
496 vint8m2_t
test_vneg_v_i8m2_tumu(vbool4_t mask
, vint8m2_t maskedoff
, vint8m2_t op1
, size_t vl
) {
497 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
500 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vneg_v_i8m4_tumu
501 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
502 // CHECK-RV64-NEXT: entry:
503 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 0, <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
504 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
506 vint8m4_t
test_vneg_v_i8m4_tumu(vbool2_t mask
, vint8m4_t maskedoff
, vint8m4_t op1
, size_t vl
) {
507 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
510 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vneg_v_i8m8_tumu
511 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
512 // CHECK-RV64-NEXT: entry:
513 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 0, <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0)
514 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
516 vint8m8_t
test_vneg_v_i8m8_tumu(vbool1_t mask
, vint8m8_t maskedoff
, vint8m8_t op1
, size_t vl
) {
517 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
520 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vneg_v_i16mf4_tumu
521 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
522 // CHECK-RV64-NEXT: entry:
523 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 0, <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
524 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
526 vint16mf4_t
test_vneg_v_i16mf4_tumu(vbool64_t mask
, vint16mf4_t maskedoff
, vint16mf4_t op1
, size_t vl
) {
527 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
530 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vneg_v_i16mf2_tumu
531 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
532 // CHECK-RV64-NEXT: entry:
533 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 0, <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
534 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
536 vint16mf2_t
test_vneg_v_i16mf2_tumu(vbool32_t mask
, vint16mf2_t maskedoff
, vint16mf2_t op1
, size_t vl
) {
537 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
540 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vneg_v_i16m1_tumu
541 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
542 // CHECK-RV64-NEXT: entry:
543 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 0, <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
544 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
546 vint16m1_t
test_vneg_v_i16m1_tumu(vbool16_t mask
, vint16m1_t maskedoff
, vint16m1_t op1
, size_t vl
) {
547 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
550 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vneg_v_i16m2_tumu
551 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
552 // CHECK-RV64-NEXT: entry:
553 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 0, <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
554 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
556 vint16m2_t
test_vneg_v_i16m2_tumu(vbool8_t mask
, vint16m2_t maskedoff
, vint16m2_t op1
, size_t vl
) {
557 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
560 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vneg_v_i16m4_tumu
561 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
562 // CHECK-RV64-NEXT: entry:
563 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 0, <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
564 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
566 vint16m4_t
test_vneg_v_i16m4_tumu(vbool4_t mask
, vint16m4_t maskedoff
, vint16m4_t op1
, size_t vl
) {
567 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
570 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vneg_v_i16m8_tumu
571 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
572 // CHECK-RV64-NEXT: entry:
573 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 0, <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
574 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
576 vint16m8_t
test_vneg_v_i16m8_tumu(vbool2_t mask
, vint16m8_t maskedoff
, vint16m8_t op1
, size_t vl
) {
577 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
580 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vneg_v_i32mf2_tumu
581 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
582 // CHECK-RV64-NEXT: entry:
583 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 0, <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
584 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
586 vint32mf2_t
test_vneg_v_i32mf2_tumu(vbool64_t mask
, vint32mf2_t maskedoff
, vint32mf2_t op1
, size_t vl
) {
587 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
590 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vneg_v_i32m1_tumu
591 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
592 // CHECK-RV64-NEXT: entry:
593 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 0, <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
594 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
596 vint32m1_t
test_vneg_v_i32m1_tumu(vbool32_t mask
, vint32m1_t maskedoff
, vint32m1_t op1
, size_t vl
) {
597 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
600 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vneg_v_i32m2_tumu
601 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
602 // CHECK-RV64-NEXT: entry:
603 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 0, <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
604 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
606 vint32m2_t
test_vneg_v_i32m2_tumu(vbool16_t mask
, vint32m2_t maskedoff
, vint32m2_t op1
, size_t vl
) {
607 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
610 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vneg_v_i32m4_tumu
611 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
612 // CHECK-RV64-NEXT: entry:
613 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 0, <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
614 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
616 vint32m4_t
test_vneg_v_i32m4_tumu(vbool8_t mask
, vint32m4_t maskedoff
, vint32m4_t op1
, size_t vl
) {
617 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
620 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vneg_v_i32m8_tumu
621 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
622 // CHECK-RV64-NEXT: entry:
623 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 0, <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
624 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
626 vint32m8_t
test_vneg_v_i32m8_tumu(vbool4_t mask
, vint32m8_t maskedoff
, vint32m8_t op1
, size_t vl
) {
627 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
630 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vneg_v_i64m1_tumu
631 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
632 // CHECK-RV64-NEXT: entry:
633 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 0, <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
634 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
636 vint64m1_t
test_vneg_v_i64m1_tumu(vbool64_t mask
, vint64m1_t maskedoff
, vint64m1_t op1
, size_t vl
) {
637 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
640 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vneg_v_i64m2_tumu
641 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
642 // CHECK-RV64-NEXT: entry:
643 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 0, <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
644 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
646 vint64m2_t
test_vneg_v_i64m2_tumu(vbool32_t mask
, vint64m2_t maskedoff
, vint64m2_t op1
, size_t vl
) {
647 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
650 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vneg_v_i64m4_tumu
651 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
652 // CHECK-RV64-NEXT: entry:
653 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 0, <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
654 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
656 vint64m4_t
test_vneg_v_i64m4_tumu(vbool16_t mask
, vint64m4_t maskedoff
, vint64m4_t op1
, size_t vl
) {
657 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
660 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vneg_v_i64m8_tumu
661 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
662 // CHECK-RV64-NEXT: entry:
663 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 0, <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
664 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
666 vint64m8_t
test_vneg_v_i64m8_tumu(vbool8_t mask
, vint64m8_t maskedoff
, vint64m8_t op1
, size_t vl
) {
667 return __riscv_vneg_tumu(mask
, maskedoff
, op1
, vl
);
670 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vneg_v_i8mf8_mu
671 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
672 // CHECK-RV64-NEXT: entry:
673 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 0, <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
674 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
676 vint8mf8_t
test_vneg_v_i8mf8_mu(vbool64_t mask
, vint8mf8_t maskedoff
, vint8mf8_t op1
, size_t vl
) {
677 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
680 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vneg_v_i8mf4_mu
681 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
682 // CHECK-RV64-NEXT: entry:
683 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 0, <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
684 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
686 vint8mf4_t
test_vneg_v_i8mf4_mu(vbool32_t mask
, vint8mf4_t maskedoff
, vint8mf4_t op1
, size_t vl
) {
687 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
690 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vneg_v_i8mf2_mu
691 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
692 // CHECK-RV64-NEXT: entry:
693 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 0, <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
694 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
696 vint8mf2_t
test_vneg_v_i8mf2_mu(vbool16_t mask
, vint8mf2_t maskedoff
, vint8mf2_t op1
, size_t vl
) {
697 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
700 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vneg_v_i8m1_mu
701 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
702 // CHECK-RV64-NEXT: entry:
703 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 0, <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
704 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
706 vint8m1_t
test_vneg_v_i8m1_mu(vbool8_t mask
, vint8m1_t maskedoff
, vint8m1_t op1
, size_t vl
) {
707 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
710 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vneg_v_i8m2_mu
711 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
712 // CHECK-RV64-NEXT: entry:
713 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 0, <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
714 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
716 vint8m2_t
test_vneg_v_i8m2_mu(vbool4_t mask
, vint8m2_t maskedoff
, vint8m2_t op1
, size_t vl
) {
717 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
720 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vneg_v_i8m4_mu
721 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
722 // CHECK-RV64-NEXT: entry:
723 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 0, <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
724 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
726 vint8m4_t
test_vneg_v_i8m4_mu(vbool2_t mask
, vint8m4_t maskedoff
, vint8m4_t op1
, size_t vl
) {
727 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
730 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vneg_v_i8m8_mu
731 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
732 // CHECK-RV64-NEXT: entry:
733 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 0, <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1)
734 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
736 vint8m8_t
test_vneg_v_i8m8_mu(vbool1_t mask
, vint8m8_t maskedoff
, vint8m8_t op1
, size_t vl
) {
737 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
740 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vneg_v_i16mf4_mu
741 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
742 // CHECK-RV64-NEXT: entry:
743 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 0, <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
744 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
746 vint16mf4_t
test_vneg_v_i16mf4_mu(vbool64_t mask
, vint16mf4_t maskedoff
, vint16mf4_t op1
, size_t vl
) {
747 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
750 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vneg_v_i16mf2_mu
751 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
752 // CHECK-RV64-NEXT: entry:
753 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 0, <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
754 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
756 vint16mf2_t
test_vneg_v_i16mf2_mu(vbool32_t mask
, vint16mf2_t maskedoff
, vint16mf2_t op1
, size_t vl
) {
757 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
760 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vneg_v_i16m1_mu
761 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
762 // CHECK-RV64-NEXT: entry:
763 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 0, <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
764 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
766 vint16m1_t
test_vneg_v_i16m1_mu(vbool16_t mask
, vint16m1_t maskedoff
, vint16m1_t op1
, size_t vl
) {
767 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
770 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vneg_v_i16m2_mu
771 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
772 // CHECK-RV64-NEXT: entry:
773 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 0, <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
774 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
776 vint16m2_t
test_vneg_v_i16m2_mu(vbool8_t mask
, vint16m2_t maskedoff
, vint16m2_t op1
, size_t vl
) {
777 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
780 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vneg_v_i16m4_mu
781 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
782 // CHECK-RV64-NEXT: entry:
783 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 0, <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
784 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
786 vint16m4_t
test_vneg_v_i16m4_mu(vbool4_t mask
, vint16m4_t maskedoff
, vint16m4_t op1
, size_t vl
) {
787 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
790 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vneg_v_i16m8_mu
791 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
792 // CHECK-RV64-NEXT: entry:
793 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 0, <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
794 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
796 vint16m8_t
test_vneg_v_i16m8_mu(vbool2_t mask
, vint16m8_t maskedoff
, vint16m8_t op1
, size_t vl
) {
797 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
800 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vneg_v_i32mf2_mu
801 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
802 // CHECK-RV64-NEXT: entry:
803 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 0, <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
804 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
806 vint32mf2_t
test_vneg_v_i32mf2_mu(vbool64_t mask
, vint32mf2_t maskedoff
, vint32mf2_t op1
, size_t vl
) {
807 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
810 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vneg_v_i32m1_mu
811 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
812 // CHECK-RV64-NEXT: entry:
813 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 0, <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
814 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
816 vint32m1_t
test_vneg_v_i32m1_mu(vbool32_t mask
, vint32m1_t maskedoff
, vint32m1_t op1
, size_t vl
) {
817 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
820 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vneg_v_i32m2_mu
821 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
822 // CHECK-RV64-NEXT: entry:
823 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 0, <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
824 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
826 vint32m2_t
test_vneg_v_i32m2_mu(vbool16_t mask
, vint32m2_t maskedoff
, vint32m2_t op1
, size_t vl
) {
827 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
830 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vneg_v_i32m4_mu
831 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
832 // CHECK-RV64-NEXT: entry:
833 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 0, <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
834 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
836 vint32m4_t
test_vneg_v_i32m4_mu(vbool8_t mask
, vint32m4_t maskedoff
, vint32m4_t op1
, size_t vl
) {
837 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
840 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vneg_v_i32m8_mu
841 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
842 // CHECK-RV64-NEXT: entry:
843 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 0, <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
844 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
846 vint32m8_t
test_vneg_v_i32m8_mu(vbool4_t mask
, vint32m8_t maskedoff
, vint32m8_t op1
, size_t vl
) {
847 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
850 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vneg_v_i64m1_mu
851 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
852 // CHECK-RV64-NEXT: entry:
853 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 0, <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
854 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
856 vint64m1_t
test_vneg_v_i64m1_mu(vbool64_t mask
, vint64m1_t maskedoff
, vint64m1_t op1
, size_t vl
) {
857 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
860 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vneg_v_i64m2_mu
861 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
862 // CHECK-RV64-NEXT: entry:
863 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 0, <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
864 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
866 vint64m2_t
test_vneg_v_i64m2_mu(vbool32_t mask
, vint64m2_t maskedoff
, vint64m2_t op1
, size_t vl
) {
867 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
870 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vneg_v_i64m4_mu
871 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
872 // CHECK-RV64-NEXT: entry:
873 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 0, <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
874 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
876 vint64m4_t
test_vneg_v_i64m4_mu(vbool16_t mask
, vint64m4_t maskedoff
, vint64m4_t op1
, size_t vl
) {
877 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);
880 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vneg_v_i64m8_mu
881 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
882 // CHECK-RV64-NEXT: entry:
883 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 0, <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
884 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
886 vint64m8_t
test_vneg_v_i64m8_mu(vbool8_t mask
, vint64m8_t maskedoff
, vint64m8_t op1
, size_t vl
) {
887 return __riscv_vneg_mu(mask
, maskedoff
, op1
, vl
);