1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \
4 // RUN: -target-feature +zvbb \
5 // RUN: -target-feature +zvbc \
6 // RUN: -target-feature +zvkb \
7 // RUN: -target-feature +zvkg \
8 // RUN: -target-feature +zvkned \
9 // RUN: -target-feature +zvknhb \
10 // RUN: -target-feature +zvksed \
11 // RUN: -target-feature +zvksh \
12 // RUN: -disable-O0-optnone \
13 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
14 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
16 #include <riscv_vector.h>
18 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vandn_vv_u8mf8_tu
19 // CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
20 // CHECK-RV64-NEXT: entry:
21 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
22 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
24 vuint8mf8_t
test_vandn_vv_u8mf8_tu(vuint8mf8_t maskedoff
, vuint8mf8_t vs2
, vuint8mf8_t vs1
, size_t vl
) {
25 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
28 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vandn_vx_u8mf8_tu
29 // CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
30 // CHECK-RV64-NEXT: entry:
31 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], i8 [[RS1]], i64 [[VL]])
32 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
34 vuint8mf8_t
test_vandn_vx_u8mf8_tu(vuint8mf8_t maskedoff
, vuint8mf8_t vs2
, uint8_t rs1
, size_t vl
) {
35 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
38 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vandn_vv_u8mf4_tu
39 // CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
40 // CHECK-RV64-NEXT: entry:
41 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
42 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
44 vuint8mf4_t
test_vandn_vv_u8mf4_tu(vuint8mf4_t maskedoff
, vuint8mf4_t vs2
, vuint8mf4_t vs1
, size_t vl
) {
45 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
48 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vandn_vx_u8mf4_tu
49 // CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
50 // CHECK-RV64-NEXT: entry:
51 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], i8 [[RS1]], i64 [[VL]])
52 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
54 vuint8mf4_t
test_vandn_vx_u8mf4_tu(vuint8mf4_t maskedoff
, vuint8mf4_t vs2
, uint8_t rs1
, size_t vl
) {
55 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
58 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vandn_vv_u8mf2_tu
59 // CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
60 // CHECK-RV64-NEXT: entry:
61 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
62 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
64 vuint8mf2_t
test_vandn_vv_u8mf2_tu(vuint8mf2_t maskedoff
, vuint8mf2_t vs2
, vuint8mf2_t vs1
, size_t vl
) {
65 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
68 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vandn_vx_u8mf2_tu
69 // CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
70 // CHECK-RV64-NEXT: entry:
71 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], i8 [[RS1]], i64 [[VL]])
72 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
74 vuint8mf2_t
test_vandn_vx_u8mf2_tu(vuint8mf2_t maskedoff
, vuint8mf2_t vs2
, uint8_t rs1
, size_t vl
) {
75 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
78 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vandn_vv_u8m1_tu
79 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
80 // CHECK-RV64-NEXT: entry:
81 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
82 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
84 vuint8m1_t
test_vandn_vv_u8m1_tu(vuint8m1_t maskedoff
, vuint8m1_t vs2
, vuint8m1_t vs1
, size_t vl
) {
85 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
88 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vandn_vx_u8m1_tu
89 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
90 // CHECK-RV64-NEXT: entry:
91 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], i8 [[RS1]], i64 [[VL]])
92 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
94 vuint8m1_t
test_vandn_vx_u8m1_tu(vuint8m1_t maskedoff
, vuint8m1_t vs2
, uint8_t rs1
, size_t vl
) {
95 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
98 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vandn_vv_u8m2_tu
99 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
100 // CHECK-RV64-NEXT: entry:
101 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
102 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
104 vuint8m2_t
test_vandn_vv_u8m2_tu(vuint8m2_t maskedoff
, vuint8m2_t vs2
, vuint8m2_t vs1
, size_t vl
) {
105 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
108 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vandn_vx_u8m2_tu
109 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
110 // CHECK-RV64-NEXT: entry:
111 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], i8 [[RS1]], i64 [[VL]])
112 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
114 vuint8m2_t
test_vandn_vx_u8m2_tu(vuint8m2_t maskedoff
, vuint8m2_t vs2
, uint8_t rs1
, size_t vl
) {
115 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
118 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vandn_vv_u8m4_tu
119 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
120 // CHECK-RV64-NEXT: entry:
121 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
122 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
124 vuint8m4_t
test_vandn_vv_u8m4_tu(vuint8m4_t maskedoff
, vuint8m4_t vs2
, vuint8m4_t vs1
, size_t vl
) {
125 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
128 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vandn_vx_u8m4_tu
129 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
130 // CHECK-RV64-NEXT: entry:
131 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], i8 [[RS1]], i64 [[VL]])
132 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
134 vuint8m4_t
test_vandn_vx_u8m4_tu(vuint8m4_t maskedoff
, vuint8m4_t vs2
, uint8_t rs1
, size_t vl
) {
135 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
138 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vandn_vv_u8m8_tu
139 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
140 // CHECK-RV64-NEXT: entry:
141 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
142 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
144 vuint8m8_t
test_vandn_vv_u8m8_tu(vuint8m8_t maskedoff
, vuint8m8_t vs2
, vuint8m8_t vs1
, size_t vl
) {
145 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
148 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vandn_vx_u8m8_tu
149 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
150 // CHECK-RV64-NEXT: entry:
151 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], i8 [[RS1]], i64 [[VL]])
152 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
154 vuint8m8_t
test_vandn_vx_u8m8_tu(vuint8m8_t maskedoff
, vuint8m8_t vs2
, uint8_t rs1
, size_t vl
) {
155 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
158 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vandn_vv_u16mf4_tu
159 // CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
160 // CHECK-RV64-NEXT: entry:
161 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
162 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
164 vuint16mf4_t
test_vandn_vv_u16mf4_tu(vuint16mf4_t maskedoff
, vuint16mf4_t vs2
, vuint16mf4_t vs1
, size_t vl
) {
165 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
168 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vandn_vx_u16mf4_tu
169 // CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
170 // CHECK-RV64-NEXT: entry:
171 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], i16 [[RS1]], i64 [[VL]])
172 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
174 vuint16mf4_t
test_vandn_vx_u16mf4_tu(vuint16mf4_t maskedoff
, vuint16mf4_t vs2
, uint16_t rs1
, size_t vl
) {
175 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
178 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vandn_vv_u16mf2_tu
179 // CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
180 // CHECK-RV64-NEXT: entry:
181 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
182 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
184 vuint16mf2_t
test_vandn_vv_u16mf2_tu(vuint16mf2_t maskedoff
, vuint16mf2_t vs2
, vuint16mf2_t vs1
, size_t vl
) {
185 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
188 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vandn_vx_u16mf2_tu
189 // CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
190 // CHECK-RV64-NEXT: entry:
191 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], i16 [[RS1]], i64 [[VL]])
192 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
194 vuint16mf2_t
test_vandn_vx_u16mf2_tu(vuint16mf2_t maskedoff
, vuint16mf2_t vs2
, uint16_t rs1
, size_t vl
) {
195 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
198 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vandn_vv_u16m1_tu
199 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
200 // CHECK-RV64-NEXT: entry:
201 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
202 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
204 vuint16m1_t
test_vandn_vv_u16m1_tu(vuint16m1_t maskedoff
, vuint16m1_t vs2
, vuint16m1_t vs1
, size_t vl
) {
205 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
208 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vandn_vx_u16m1_tu
209 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
210 // CHECK-RV64-NEXT: entry:
211 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], i16 [[RS1]], i64 [[VL]])
212 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
214 vuint16m1_t
test_vandn_vx_u16m1_tu(vuint16m1_t maskedoff
, vuint16m1_t vs2
, uint16_t rs1
, size_t vl
) {
215 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
218 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vandn_vv_u16m2_tu
219 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
220 // CHECK-RV64-NEXT: entry:
221 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
222 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
224 vuint16m2_t
test_vandn_vv_u16m2_tu(vuint16m2_t maskedoff
, vuint16m2_t vs2
, vuint16m2_t vs1
, size_t vl
) {
225 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
228 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vandn_vx_u16m2_tu
229 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
230 // CHECK-RV64-NEXT: entry:
231 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], i16 [[RS1]], i64 [[VL]])
232 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
234 vuint16m2_t
test_vandn_vx_u16m2_tu(vuint16m2_t maskedoff
, vuint16m2_t vs2
, uint16_t rs1
, size_t vl
) {
235 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
238 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vandn_vv_u16m4_tu
239 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
240 // CHECK-RV64-NEXT: entry:
241 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
242 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
244 vuint16m4_t
test_vandn_vv_u16m4_tu(vuint16m4_t maskedoff
, vuint16m4_t vs2
, vuint16m4_t vs1
, size_t vl
) {
245 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
248 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vandn_vx_u16m4_tu
249 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
250 // CHECK-RV64-NEXT: entry:
251 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], i16 [[RS1]], i64 [[VL]])
252 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
254 vuint16m4_t
test_vandn_vx_u16m4_tu(vuint16m4_t maskedoff
, vuint16m4_t vs2
, uint16_t rs1
, size_t vl
) {
255 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
258 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vandn_vv_u16m8_tu
259 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
260 // CHECK-RV64-NEXT: entry:
261 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
262 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
264 vuint16m8_t
test_vandn_vv_u16m8_tu(vuint16m8_t maskedoff
, vuint16m8_t vs2
, vuint16m8_t vs1
, size_t vl
) {
265 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
268 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vandn_vx_u16m8_tu
269 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
270 // CHECK-RV64-NEXT: entry:
271 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], i16 [[RS1]], i64 [[VL]])
272 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
274 vuint16m8_t
test_vandn_vx_u16m8_tu(vuint16m8_t maskedoff
, vuint16m8_t vs2
, uint16_t rs1
, size_t vl
) {
275 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
278 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vandn_vv_u32mf2_tu
279 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
280 // CHECK-RV64-NEXT: entry:
281 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
282 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
284 vuint32mf2_t
test_vandn_vv_u32mf2_tu(vuint32mf2_t maskedoff
, vuint32mf2_t vs2
, vuint32mf2_t vs1
, size_t vl
) {
285 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
288 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vandn_vx_u32mf2_tu
289 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
290 // CHECK-RV64-NEXT: entry:
291 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], i32 [[RS1]], i64 [[VL]])
292 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
294 vuint32mf2_t
test_vandn_vx_u32mf2_tu(vuint32mf2_t maskedoff
, vuint32mf2_t vs2
, uint32_t rs1
, size_t vl
) {
295 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
298 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vandn_vv_u32m1_tu
299 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
300 // CHECK-RV64-NEXT: entry:
301 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
302 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
304 vuint32m1_t
test_vandn_vv_u32m1_tu(vuint32m1_t maskedoff
, vuint32m1_t vs2
, vuint32m1_t vs1
, size_t vl
) {
305 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
308 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vandn_vx_u32m1_tu
309 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
310 // CHECK-RV64-NEXT: entry:
311 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], i32 [[RS1]], i64 [[VL]])
312 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
314 vuint32m1_t
test_vandn_vx_u32m1_tu(vuint32m1_t maskedoff
, vuint32m1_t vs2
, uint32_t rs1
, size_t vl
) {
315 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
318 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vandn_vv_u32m2_tu
319 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
320 // CHECK-RV64-NEXT: entry:
321 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
322 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
324 vuint32m2_t
test_vandn_vv_u32m2_tu(vuint32m2_t maskedoff
, vuint32m2_t vs2
, vuint32m2_t vs1
, size_t vl
) {
325 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
328 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vandn_vx_u32m2_tu
329 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
330 // CHECK-RV64-NEXT: entry:
331 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], i32 [[RS1]], i64 [[VL]])
332 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
334 vuint32m2_t
test_vandn_vx_u32m2_tu(vuint32m2_t maskedoff
, vuint32m2_t vs2
, uint32_t rs1
, size_t vl
) {
335 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
338 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vandn_vv_u32m4_tu
339 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
340 // CHECK-RV64-NEXT: entry:
341 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
342 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
344 vuint32m4_t
test_vandn_vv_u32m4_tu(vuint32m4_t maskedoff
, vuint32m4_t vs2
, vuint32m4_t vs1
, size_t vl
) {
345 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
348 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vandn_vx_u32m4_tu
349 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
350 // CHECK-RV64-NEXT: entry:
351 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], i32 [[RS1]], i64 [[VL]])
352 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
354 vuint32m4_t
test_vandn_vx_u32m4_tu(vuint32m4_t maskedoff
, vuint32m4_t vs2
, uint32_t rs1
, size_t vl
) {
355 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
358 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vandn_vv_u32m8_tu
359 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
360 // CHECK-RV64-NEXT: entry:
361 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], i64 [[VL]])
362 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
364 vuint32m8_t
test_vandn_vv_u32m8_tu(vuint32m8_t maskedoff
, vuint32m8_t vs2
, vuint32m8_t vs1
, size_t vl
) {
365 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
368 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vandn_vx_u32m8_tu
369 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
370 // CHECK-RV64-NEXT: entry:
371 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], i32 [[RS1]], i64 [[VL]])
372 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
374 vuint32m8_t
test_vandn_vx_u32m8_tu(vuint32m8_t maskedoff
, vuint32m8_t vs2
, uint32_t rs1
, size_t vl
) {
375 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
378 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vandn_vv_u64m1_tu
379 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
380 // CHECK-RV64-NEXT: entry:
381 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
382 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
384 vuint64m1_t
test_vandn_vv_u64m1_tu(vuint64m1_t maskedoff
, vuint64m1_t vs2
, vuint64m1_t vs1
, size_t vl
) {
385 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
388 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vandn_vx_u64m1_tu
389 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
390 // CHECK-RV64-NEXT: entry:
391 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], i64 [[RS1]], i64 [[VL]])
392 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
394 vuint64m1_t
test_vandn_vx_u64m1_tu(vuint64m1_t maskedoff
, vuint64m1_t vs2
, uint64_t rs1
, size_t vl
) {
395 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
398 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vandn_vv_u64m2_tu
399 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
400 // CHECK-RV64-NEXT: entry:
401 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
402 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
404 vuint64m2_t
test_vandn_vv_u64m2_tu(vuint64m2_t maskedoff
, vuint64m2_t vs2
, vuint64m2_t vs1
, size_t vl
) {
405 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
408 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vandn_vx_u64m2_tu
409 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
410 // CHECK-RV64-NEXT: entry:
411 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], i64 [[RS1]], i64 [[VL]])
412 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
414 vuint64m2_t
test_vandn_vx_u64m2_tu(vuint64m2_t maskedoff
, vuint64m2_t vs2
, uint64_t rs1
, size_t vl
) {
415 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
418 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vandn_vv_u64m4_tu
419 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
420 // CHECK-RV64-NEXT: entry:
421 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
422 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
424 vuint64m4_t
test_vandn_vv_u64m4_tu(vuint64m4_t maskedoff
, vuint64m4_t vs2
, vuint64m4_t vs1
, size_t vl
) {
425 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
428 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vandn_vx_u64m4_tu
429 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
430 // CHECK-RV64-NEXT: entry:
431 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], i64 [[RS1]], i64 [[VL]])
432 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
434 vuint64m4_t
test_vandn_vx_u64m4_tu(vuint64m4_t maskedoff
, vuint64m4_t vs2
, uint64_t rs1
, size_t vl
) {
435 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
438 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vandn_vv_u64m8_tu
439 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
440 // CHECK-RV64-NEXT: entry:
441 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], i64 [[VL]])
442 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
444 vuint64m8_t
test_vandn_vv_u64m8_tu(vuint64m8_t maskedoff
, vuint64m8_t vs2
, vuint64m8_t vs1
, size_t vl
) {
445 return __riscv_vandn_tu(maskedoff
, vs2
, vs1
, vl
);
448 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vandn_vx_u64m8_tu
449 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
450 // CHECK-RV64-NEXT: entry:
451 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], i64 [[RS1]], i64 [[VL]])
452 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
454 vuint64m8_t
test_vandn_vx_u64m8_tu(vuint64m8_t maskedoff
, vuint64m8_t vs2
, uint64_t rs1
, size_t vl
) {
455 return __riscv_vandn_tu(maskedoff
, vs2
, rs1
, vl
);
458 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vandn_vv_u8mf8_tum
459 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
460 // CHECK-RV64-NEXT: entry:
461 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
462 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
464 vuint8mf8_t
test_vandn_vv_u8mf8_tum(vbool64_t mask
, vuint8mf8_t maskedoff
, vuint8mf8_t vs2
, vuint8mf8_t vs1
, size_t vl
) {
465 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
468 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vandn_vx_u8mf8_tum
469 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
470 // CHECK-RV64-NEXT: entry:
471 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], i8 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
472 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
474 vuint8mf8_t
test_vandn_vx_u8mf8_tum(vbool64_t mask
, vuint8mf8_t maskedoff
, vuint8mf8_t vs2
, uint8_t rs1
, size_t vl
) {
475 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
478 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vandn_vv_u8mf4_tum
479 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
480 // CHECK-RV64-NEXT: entry:
481 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
482 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
484 vuint8mf4_t
test_vandn_vv_u8mf4_tum(vbool32_t mask
, vuint8mf4_t maskedoff
, vuint8mf4_t vs2
, vuint8mf4_t vs1
, size_t vl
) {
485 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
488 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vandn_vx_u8mf4_tum
489 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
490 // CHECK-RV64-NEXT: entry:
491 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], i8 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
492 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
494 vuint8mf4_t
test_vandn_vx_u8mf4_tum(vbool32_t mask
, vuint8mf4_t maskedoff
, vuint8mf4_t vs2
, uint8_t rs1
, size_t vl
) {
495 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
498 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vandn_vv_u8mf2_tum
499 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
500 // CHECK-RV64-NEXT: entry:
501 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
502 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
504 vuint8mf2_t
test_vandn_vv_u8mf2_tum(vbool16_t mask
, vuint8mf2_t maskedoff
, vuint8mf2_t vs2
, vuint8mf2_t vs1
, size_t vl
) {
505 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
508 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vandn_vx_u8mf2_tum
509 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
510 // CHECK-RV64-NEXT: entry:
511 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], i8 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
512 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
514 vuint8mf2_t
test_vandn_vx_u8mf2_tum(vbool16_t mask
, vuint8mf2_t maskedoff
, vuint8mf2_t vs2
, uint8_t rs1
, size_t vl
) {
515 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
518 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vandn_vv_u8m1_tum
519 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
520 // CHECK-RV64-NEXT: entry:
521 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
522 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
524 vuint8m1_t
test_vandn_vv_u8m1_tum(vbool8_t mask
, vuint8m1_t maskedoff
, vuint8m1_t vs2
, vuint8m1_t vs1
, size_t vl
) {
525 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
528 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vandn_vx_u8m1_tum
529 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
530 // CHECK-RV64-NEXT: entry:
531 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], i8 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
532 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
534 vuint8m1_t
test_vandn_vx_u8m1_tum(vbool8_t mask
, vuint8m1_t maskedoff
, vuint8m1_t vs2
, uint8_t rs1
, size_t vl
) {
535 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
538 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vandn_vv_u8m2_tum
539 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
540 // CHECK-RV64-NEXT: entry:
541 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
542 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
544 vuint8m2_t
test_vandn_vv_u8m2_tum(vbool4_t mask
, vuint8m2_t maskedoff
, vuint8m2_t vs2
, vuint8m2_t vs1
, size_t vl
) {
545 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
548 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vandn_vx_u8m2_tum
549 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
550 // CHECK-RV64-NEXT: entry:
551 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], i8 [[RS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
552 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
554 vuint8m2_t
test_vandn_vx_u8m2_tum(vbool4_t mask
, vuint8m2_t maskedoff
, vuint8m2_t vs2
, uint8_t rs1
, size_t vl
) {
555 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
558 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vandn_vv_u8m4_tum
559 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
560 // CHECK-RV64-NEXT: entry:
561 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
562 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
564 vuint8m4_t
test_vandn_vv_u8m4_tum(vbool2_t mask
, vuint8m4_t maskedoff
, vuint8m4_t vs2
, vuint8m4_t vs1
, size_t vl
) {
565 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
568 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vandn_vx_u8m4_tum
569 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
570 // CHECK-RV64-NEXT: entry:
571 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], i8 [[RS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
572 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
574 vuint8m4_t
test_vandn_vx_u8m4_tum(vbool2_t mask
, vuint8m4_t maskedoff
, vuint8m4_t vs2
, uint8_t rs1
, size_t vl
) {
575 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
578 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vandn_vv_u8m8_tum
579 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
580 // CHECK-RV64-NEXT: entry:
581 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2)
582 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
584 vuint8m8_t
test_vandn_vv_u8m8_tum(vbool1_t mask
, vuint8m8_t maskedoff
, vuint8m8_t vs2
, vuint8m8_t vs1
, size_t vl
) {
585 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
588 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vandn_vx_u8m8_tum
589 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
590 // CHECK-RV64-NEXT: entry:
591 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], i8 [[RS1]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2)
592 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
594 vuint8m8_t
test_vandn_vx_u8m8_tum(vbool1_t mask
, vuint8m8_t maskedoff
, vuint8m8_t vs2
, uint8_t rs1
, size_t vl
) {
595 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
598 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vandn_vv_u16mf4_tum
599 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
600 // CHECK-RV64-NEXT: entry:
601 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
602 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
604 vuint16mf4_t
test_vandn_vv_u16mf4_tum(vbool64_t mask
, vuint16mf4_t maskedoff
, vuint16mf4_t vs2
, vuint16mf4_t vs1
, size_t vl
) {
605 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
608 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vandn_vx_u16mf4_tum
609 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
610 // CHECK-RV64-NEXT: entry:
611 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], i16 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
612 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
614 vuint16mf4_t
test_vandn_vx_u16mf4_tum(vbool64_t mask
, vuint16mf4_t maskedoff
, vuint16mf4_t vs2
, uint16_t rs1
, size_t vl
) {
615 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
618 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vandn_vv_u16mf2_tum
619 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
620 // CHECK-RV64-NEXT: entry:
621 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
622 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
624 vuint16mf2_t
test_vandn_vv_u16mf2_tum(vbool32_t mask
, vuint16mf2_t maskedoff
, vuint16mf2_t vs2
, vuint16mf2_t vs1
, size_t vl
) {
625 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
628 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vandn_vx_u16mf2_tum
629 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
630 // CHECK-RV64-NEXT: entry:
631 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], i16 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
632 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
634 vuint16mf2_t
test_vandn_vx_u16mf2_tum(vbool32_t mask
, vuint16mf2_t maskedoff
, vuint16mf2_t vs2
, uint16_t rs1
, size_t vl
) {
635 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
638 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vandn_vv_u16m1_tum
639 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
640 // CHECK-RV64-NEXT: entry:
641 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
642 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
644 vuint16m1_t
test_vandn_vv_u16m1_tum(vbool16_t mask
, vuint16m1_t maskedoff
, vuint16m1_t vs2
, vuint16m1_t vs1
, size_t vl
) {
645 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
648 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vandn_vx_u16m1_tum
649 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
650 // CHECK-RV64-NEXT: entry:
651 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], i16 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
652 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
654 vuint16m1_t
test_vandn_vx_u16m1_tum(vbool16_t mask
, vuint16m1_t maskedoff
, vuint16m1_t vs2
, uint16_t rs1
, size_t vl
) {
655 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
658 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vandn_vv_u16m2_tum
659 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
660 // CHECK-RV64-NEXT: entry:
661 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
662 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
664 vuint16m2_t
test_vandn_vv_u16m2_tum(vbool8_t mask
, vuint16m2_t maskedoff
, vuint16m2_t vs2
, vuint16m2_t vs1
, size_t vl
) {
665 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
668 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vandn_vx_u16m2_tum
669 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
670 // CHECK-RV64-NEXT: entry:
671 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], i16 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
672 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
674 vuint16m2_t
test_vandn_vx_u16m2_tum(vbool8_t mask
, vuint16m2_t maskedoff
, vuint16m2_t vs2
, uint16_t rs1
, size_t vl
) {
675 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
678 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vandn_vv_u16m4_tum
679 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
680 // CHECK-RV64-NEXT: entry:
681 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
682 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
684 vuint16m4_t
test_vandn_vv_u16m4_tum(vbool4_t mask
, vuint16m4_t maskedoff
, vuint16m4_t vs2
, vuint16m4_t vs1
, size_t vl
) {
685 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
688 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vandn_vx_u16m4_tum
689 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
690 // CHECK-RV64-NEXT: entry:
691 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], i16 [[RS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
692 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
694 vuint16m4_t
test_vandn_vx_u16m4_tum(vbool4_t mask
, vuint16m4_t maskedoff
, vuint16m4_t vs2
, uint16_t rs1
, size_t vl
) {
695 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
698 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vandn_vv_u16m8_tum
699 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
700 // CHECK-RV64-NEXT: entry:
701 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
702 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
704 vuint16m8_t
test_vandn_vv_u16m8_tum(vbool2_t mask
, vuint16m8_t maskedoff
, vuint16m8_t vs2
, vuint16m8_t vs1
, size_t vl
) {
705 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
708 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vandn_vx_u16m8_tum
709 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
710 // CHECK-RV64-NEXT: entry:
711 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], i16 [[RS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
712 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
714 vuint16m8_t
test_vandn_vx_u16m8_tum(vbool2_t mask
, vuint16m8_t maskedoff
, vuint16m8_t vs2
, uint16_t rs1
, size_t vl
) {
715 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
718 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vandn_vv_u32mf2_tum
719 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
720 // CHECK-RV64-NEXT: entry:
721 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
722 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
724 vuint32mf2_t
test_vandn_vv_u32mf2_tum(vbool64_t mask
, vuint32mf2_t maskedoff
, vuint32mf2_t vs2
, vuint32mf2_t vs1
, size_t vl
) {
725 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
728 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vandn_vx_u32mf2_tum
729 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
730 // CHECK-RV64-NEXT: entry:
731 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
732 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
734 vuint32mf2_t
test_vandn_vx_u32mf2_tum(vbool64_t mask
, vuint32mf2_t maskedoff
, vuint32mf2_t vs2
, uint32_t rs1
, size_t vl
) {
735 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
738 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vandn_vv_u32m1_tum
739 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
740 // CHECK-RV64-NEXT: entry:
741 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
742 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
744 vuint32m1_t
test_vandn_vv_u32m1_tum(vbool32_t mask
, vuint32m1_t maskedoff
, vuint32m1_t vs2
, vuint32m1_t vs1
, size_t vl
) {
745 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
748 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vandn_vx_u32m1_tum
749 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
750 // CHECK-RV64-NEXT: entry:
751 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
752 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
754 vuint32m1_t
test_vandn_vx_u32m1_tum(vbool32_t mask
, vuint32m1_t maskedoff
, vuint32m1_t vs2
, uint32_t rs1
, size_t vl
) {
755 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
758 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vandn_vv_u32m2_tum
759 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
760 // CHECK-RV64-NEXT: entry:
761 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
762 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
764 vuint32m2_t
test_vandn_vv_u32m2_tum(vbool16_t mask
, vuint32m2_t maskedoff
, vuint32m2_t vs2
, vuint32m2_t vs1
, size_t vl
) {
765 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
768 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vandn_vx_u32m2_tum
769 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
770 // CHECK-RV64-NEXT: entry:
771 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
772 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
774 vuint32m2_t
test_vandn_vx_u32m2_tum(vbool16_t mask
, vuint32m2_t maskedoff
, vuint32m2_t vs2
, uint32_t rs1
, size_t vl
) {
775 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
778 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vandn_vv_u32m4_tum
779 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
780 // CHECK-RV64-NEXT: entry:
781 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
782 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
784 vuint32m4_t
test_vandn_vv_u32m4_tum(vbool8_t mask
, vuint32m4_t maskedoff
, vuint32m4_t vs2
, vuint32m4_t vs1
, size_t vl
) {
785 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
788 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vandn_vx_u32m4_tum
789 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
790 // CHECK-RV64-NEXT: entry:
791 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
792 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
794 vuint32m4_t
test_vandn_vx_u32m4_tum(vbool8_t mask
, vuint32m4_t maskedoff
, vuint32m4_t vs2
, uint32_t rs1
, size_t vl
) {
795 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
798 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vandn_vv_u32m8_tum
799 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
800 // CHECK-RV64-NEXT: entry:
801 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
802 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
804 vuint32m8_t
test_vandn_vv_u32m8_tum(vbool4_t mask
, vuint32m8_t maskedoff
, vuint32m8_t vs2
, vuint32m8_t vs1
, size_t vl
) {
805 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
808 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vandn_vx_u32m8_tum
809 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
810 // CHECK-RV64-NEXT: entry:
811 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
812 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
814 vuint32m8_t
test_vandn_vx_u32m8_tum(vbool4_t mask
, vuint32m8_t maskedoff
, vuint32m8_t vs2
, uint32_t rs1
, size_t vl
) {
815 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
818 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vandn_vv_u64m1_tum
819 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
820 // CHECK-RV64-NEXT: entry:
821 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
822 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
824 vuint64m1_t
test_vandn_vv_u64m1_tum(vbool64_t mask
, vuint64m1_t maskedoff
, vuint64m1_t vs2
, vuint64m1_t vs1
, size_t vl
) {
825 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
828 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vandn_vx_u64m1_tum
829 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
830 // CHECK-RV64-NEXT: entry:
831 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], i64 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
832 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
834 vuint64m1_t
test_vandn_vx_u64m1_tum(vbool64_t mask
, vuint64m1_t maskedoff
, vuint64m1_t vs2
, uint64_t rs1
, size_t vl
) {
835 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
838 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vandn_vv_u64m2_tum
839 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
840 // CHECK-RV64-NEXT: entry:
841 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
842 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
844 vuint64m2_t
test_vandn_vv_u64m2_tum(vbool32_t mask
, vuint64m2_t maskedoff
, vuint64m2_t vs2
, vuint64m2_t vs1
, size_t vl
) {
845 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
848 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vandn_vx_u64m2_tum
849 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
850 // CHECK-RV64-NEXT: entry:
851 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], i64 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
852 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
854 vuint64m2_t
test_vandn_vx_u64m2_tum(vbool32_t mask
, vuint64m2_t maskedoff
, vuint64m2_t vs2
, uint64_t rs1
, size_t vl
) {
855 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
858 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vandn_vv_u64m4_tum
859 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
860 // CHECK-RV64-NEXT: entry:
861 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
862 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
864 vuint64m4_t
test_vandn_vv_u64m4_tum(vbool16_t mask
, vuint64m4_t maskedoff
, vuint64m4_t vs2
, vuint64m4_t vs1
, size_t vl
) {
865 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
868 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vandn_vx_u64m4_tum
869 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
870 // CHECK-RV64-NEXT: entry:
871 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], i64 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
872 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
874 vuint64m4_t
test_vandn_vx_u64m4_tum(vbool16_t mask
, vuint64m4_t maskedoff
, vuint64m4_t vs2
, uint64_t rs1
, size_t vl
) {
875 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
878 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vandn_vv_u64m8_tum
879 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
880 // CHECK-RV64-NEXT: entry:
881 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
882 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
884 vuint64m8_t
test_vandn_vv_u64m8_tum(vbool8_t mask
, vuint64m8_t maskedoff
, vuint64m8_t vs2
, vuint64m8_t vs1
, size_t vl
) {
885 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, vs1
, vl
);
888 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vandn_vx_u64m8_tum
889 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
890 // CHECK-RV64-NEXT: entry:
891 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], i64 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
892 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
894 vuint64m8_t
test_vandn_vx_u64m8_tum(vbool8_t mask
, vuint64m8_t maskedoff
, vuint64m8_t vs2
, uint64_t rs1
, size_t vl
) {
895 return __riscv_vandn_tum(mask
, maskedoff
, vs2
, rs1
, vl
);
898 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vandn_vv_u8mf8_tumu
899 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
900 // CHECK-RV64-NEXT: entry:
901 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
902 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
904 vuint8mf8_t
test_vandn_vv_u8mf8_tumu(vbool64_t mask
, vuint8mf8_t maskedoff
, vuint8mf8_t vs2
, vuint8mf8_t vs1
, size_t vl
) {
905 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
908 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vandn_vx_u8mf8_tumu
909 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
910 // CHECK-RV64-NEXT: entry:
911 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], i8 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
912 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
914 vuint8mf8_t
test_vandn_vx_u8mf8_tumu(vbool64_t mask
, vuint8mf8_t maskedoff
, vuint8mf8_t vs2
, uint8_t rs1
, size_t vl
) {
915 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
918 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vandn_vv_u8mf4_tumu
919 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
920 // CHECK-RV64-NEXT: entry:
921 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
922 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
924 vuint8mf4_t
test_vandn_vv_u8mf4_tumu(vbool32_t mask
, vuint8mf4_t maskedoff
, vuint8mf4_t vs2
, vuint8mf4_t vs1
, size_t vl
) {
925 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
928 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vandn_vx_u8mf4_tumu
929 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
930 // CHECK-RV64-NEXT: entry:
931 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], i8 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
932 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
934 vuint8mf4_t
test_vandn_vx_u8mf4_tumu(vbool32_t mask
, vuint8mf4_t maskedoff
, vuint8mf4_t vs2
, uint8_t rs1
, size_t vl
) {
935 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
938 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vandn_vv_u8mf2_tumu
939 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
940 // CHECK-RV64-NEXT: entry:
941 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
942 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
944 vuint8mf2_t
test_vandn_vv_u8mf2_tumu(vbool16_t mask
, vuint8mf2_t maskedoff
, vuint8mf2_t vs2
, vuint8mf2_t vs1
, size_t vl
) {
945 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
948 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vandn_vx_u8mf2_tumu
949 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
950 // CHECK-RV64-NEXT: entry:
951 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], i8 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
952 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
954 vuint8mf2_t
test_vandn_vx_u8mf2_tumu(vbool16_t mask
, vuint8mf2_t maskedoff
, vuint8mf2_t vs2
, uint8_t rs1
, size_t vl
) {
955 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
958 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vandn_vv_u8m1_tumu
959 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
960 // CHECK-RV64-NEXT: entry:
961 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
962 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
964 vuint8m1_t
test_vandn_vv_u8m1_tumu(vbool8_t mask
, vuint8m1_t maskedoff
, vuint8m1_t vs2
, vuint8m1_t vs1
, size_t vl
) {
965 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
968 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vandn_vx_u8m1_tumu
969 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
970 // CHECK-RV64-NEXT: entry:
971 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], i8 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
972 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
974 vuint8m1_t
test_vandn_vx_u8m1_tumu(vbool8_t mask
, vuint8m1_t maskedoff
, vuint8m1_t vs2
, uint8_t rs1
, size_t vl
) {
975 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
978 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vandn_vv_u8m2_tumu
979 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
980 // CHECK-RV64-NEXT: entry:
981 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
982 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
984 vuint8m2_t
test_vandn_vv_u8m2_tumu(vbool4_t mask
, vuint8m2_t maskedoff
, vuint8m2_t vs2
, vuint8m2_t vs1
, size_t vl
) {
985 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
988 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vandn_vx_u8m2_tumu
989 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
990 // CHECK-RV64-NEXT: entry:
991 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], i8 [[RS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
992 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
994 vuint8m2_t
test_vandn_vx_u8m2_tumu(vbool4_t mask
, vuint8m2_t maskedoff
, vuint8m2_t vs2
, uint8_t rs1
, size_t vl
) {
995 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
998 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vandn_vv_u8m4_tumu
999 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1000 // CHECK-RV64-NEXT: entry:
1001 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
1002 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
1004 vuint8m4_t
test_vandn_vv_u8m4_tumu(vbool2_t mask
, vuint8m4_t maskedoff
, vuint8m4_t vs2
, vuint8m4_t vs1
, size_t vl
) {
1005 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
1008 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vandn_vx_u8m4_tumu
1009 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1010 // CHECK-RV64-NEXT: entry:
1011 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], i8 [[RS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
1012 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
1014 vuint8m4_t
test_vandn_vx_u8m4_tumu(vbool2_t mask
, vuint8m4_t maskedoff
, vuint8m4_t vs2
, uint8_t rs1
, size_t vl
) {
1015 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
1018 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vandn_vv_u8m8_tumu
1019 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1020 // CHECK-RV64-NEXT: entry:
1021 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0)
1022 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
1024 vuint8m8_t
test_vandn_vv_u8m8_tumu(vbool1_t mask
, vuint8m8_t maskedoff
, vuint8m8_t vs2
, vuint8m8_t vs1
, size_t vl
) {
1025 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
1028 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vandn_vx_u8m8_tumu
1029 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1030 // CHECK-RV64-NEXT: entry:
1031 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], i8 [[RS1]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0)
1032 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
1034 vuint8m8_t
test_vandn_vx_u8m8_tumu(vbool1_t mask
, vuint8m8_t maskedoff
, vuint8m8_t vs2
, uint8_t rs1
, size_t vl
) {
1035 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
1038 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vandn_vv_u16mf4_tumu
1039 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1040 // CHECK-RV64-NEXT: entry:
1041 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1042 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1044 vuint16mf4_t
test_vandn_vv_u16mf4_tumu(vbool64_t mask
, vuint16mf4_t maskedoff
, vuint16mf4_t vs2
, vuint16mf4_t vs1
, size_t vl
) {
1045 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
1048 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vandn_vx_u16mf4_tumu
1049 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1050 // CHECK-RV64-NEXT: entry:
1051 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], i16 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1052 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1054 vuint16mf4_t
test_vandn_vx_u16mf4_tumu(vbool64_t mask
, vuint16mf4_t maskedoff
, vuint16mf4_t vs2
, uint16_t rs1
, size_t vl
) {
1055 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
1058 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vandn_vv_u16mf2_tumu
1059 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1060 // CHECK-RV64-NEXT: entry:
1061 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1062 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1064 vuint16mf2_t
test_vandn_vv_u16mf2_tumu(vbool32_t mask
, vuint16mf2_t maskedoff
, vuint16mf2_t vs2
, vuint16mf2_t vs1
, size_t vl
) {
1065 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
1068 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vandn_vx_u16mf2_tumu
1069 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1070 // CHECK-RV64-NEXT: entry:
1071 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], i16 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1072 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1074 vuint16mf2_t
test_vandn_vx_u16mf2_tumu(vbool32_t mask
, vuint16mf2_t maskedoff
, vuint16mf2_t vs2
, uint16_t rs1
, size_t vl
) {
1075 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
1078 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vandn_vv_u16m1_tumu
1079 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1080 // CHECK-RV64-NEXT: entry:
1081 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1082 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1084 vuint16m1_t
test_vandn_vv_u16m1_tumu(vbool16_t mask
, vuint16m1_t maskedoff
, vuint16m1_t vs2
, vuint16m1_t vs1
, size_t vl
) {
1085 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
1088 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vandn_vx_u16m1_tumu
1089 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1090 // CHECK-RV64-NEXT: entry:
1091 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], i16 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1092 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1094 vuint16m1_t
test_vandn_vx_u16m1_tumu(vbool16_t mask
, vuint16m1_t maskedoff
, vuint16m1_t vs2
, uint16_t rs1
, size_t vl
) {
1095 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
1098 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vandn_vv_u16m2_tumu
1099 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1100 // CHECK-RV64-NEXT: entry:
1101 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1102 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1104 vuint16m2_t
test_vandn_vv_u16m2_tumu(vbool8_t mask
, vuint16m2_t maskedoff
, vuint16m2_t vs2
, vuint16m2_t vs1
, size_t vl
) {
1105 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
1108 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vandn_vx_u16m2_tumu
1109 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1110 // CHECK-RV64-NEXT: entry:
1111 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], i16 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1112 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1114 vuint16m2_t
test_vandn_vx_u16m2_tumu(vbool8_t mask
, vuint16m2_t maskedoff
, vuint16m2_t vs2
, uint16_t rs1
, size_t vl
) {
1115 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
1118 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vandn_vv_u16m4_tumu
1119 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1120 // CHECK-RV64-NEXT: entry:
1121 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
1122 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1124 vuint16m4_t
test_vandn_vv_u16m4_tumu(vbool4_t mask
, vuint16m4_t maskedoff
, vuint16m4_t vs2
, vuint16m4_t vs1
, size_t vl
) {
1125 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
1128 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vandn_vx_u16m4_tumu
1129 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1130 // CHECK-RV64-NEXT: entry:
1131 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], i16 [[RS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
1132 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1134 vuint16m4_t
test_vandn_vx_u16m4_tumu(vbool4_t mask
, vuint16m4_t maskedoff
, vuint16m4_t vs2
, uint16_t rs1
, size_t vl
) {
1135 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
1138 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vandn_vv_u16m8_tumu
1139 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1140 // CHECK-RV64-NEXT: entry:
1141 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
1142 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1144 vuint16m8_t
test_vandn_vv_u16m8_tumu(vbool2_t mask
, vuint16m8_t maskedoff
, vuint16m8_t vs2
, vuint16m8_t vs1
, size_t vl
) {
1145 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
1148 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vandn_vx_u16m8_tumu
1149 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1150 // CHECK-RV64-NEXT: entry:
1151 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], i16 [[RS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
1152 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1154 vuint16m8_t
test_vandn_vx_u16m8_tumu(vbool2_t mask
, vuint16m8_t maskedoff
, vuint16m8_t vs2
, uint16_t rs1
, size_t vl
) {
1155 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
1158 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vandn_vv_u32mf2_tumu
1159 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1160 // CHECK-RV64-NEXT: entry:
1161 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1162 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1164 vuint32mf2_t
test_vandn_vv_u32mf2_tumu(vbool64_t mask
, vuint32mf2_t maskedoff
, vuint32mf2_t vs2
, vuint32mf2_t vs1
, size_t vl
) {
1165 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
1168 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vandn_vx_u32mf2_tumu
1169 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1170 // CHECK-RV64-NEXT: entry:
1171 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1172 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1174 vuint32mf2_t
test_vandn_vx_u32mf2_tumu(vbool64_t mask
, vuint32mf2_t maskedoff
, vuint32mf2_t vs2
, uint32_t rs1
, size_t vl
) {
1175 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
1178 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vandn_vv_u32m1_tumu
1179 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1180 // CHECK-RV64-NEXT: entry:
1181 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1182 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1184 vuint32m1_t
test_vandn_vv_u32m1_tumu(vbool32_t mask
, vuint32m1_t maskedoff
, vuint32m1_t vs2
, vuint32m1_t vs1
, size_t vl
) {
1185 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
1188 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vandn_vx_u32m1_tumu
1189 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1190 // CHECK-RV64-NEXT: entry:
1191 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1192 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1194 vuint32m1_t
test_vandn_vx_u32m1_tumu(vbool32_t mask
, vuint32m1_t maskedoff
, vuint32m1_t vs2
, uint32_t rs1
, size_t vl
) {
1195 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
1198 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vandn_vv_u32m2_tumu
1199 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1200 // CHECK-RV64-NEXT: entry:
1201 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1202 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1204 vuint32m2_t
test_vandn_vv_u32m2_tumu(vbool16_t mask
, vuint32m2_t maskedoff
, vuint32m2_t vs2
, vuint32m2_t vs1
, size_t vl
) {
1205 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
1208 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vandn_vx_u32m2_tumu
1209 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1210 // CHECK-RV64-NEXT: entry:
1211 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1212 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1214 vuint32m2_t
test_vandn_vx_u32m2_tumu(vbool16_t mask
, vuint32m2_t maskedoff
, vuint32m2_t vs2
, uint32_t rs1
, size_t vl
) {
1215 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
1218 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vandn_vv_u32m4_tumu
1219 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1220 // CHECK-RV64-NEXT: entry:
1221 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1222 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1224 vuint32m4_t
test_vandn_vv_u32m4_tumu(vbool8_t mask
, vuint32m4_t maskedoff
, vuint32m4_t vs2
, vuint32m4_t vs1
, size_t vl
) {
1225 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
1228 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vandn_vx_u32m4_tumu
1229 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1230 // CHECK-RV64-NEXT: entry:
1231 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1232 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1234 vuint32m4_t
test_vandn_vx_u32m4_tumu(vbool8_t mask
, vuint32m4_t maskedoff
, vuint32m4_t vs2
, uint32_t rs1
, size_t vl
) {
1235 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
1238 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vandn_vv_u32m8_tumu
1239 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1240 // CHECK-RV64-NEXT: entry:
1241 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
1242 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1244 vuint32m8_t
test_vandn_vv_u32m8_tumu(vbool4_t mask
, vuint32m8_t maskedoff
, vuint32m8_t vs2
, vuint32m8_t vs1
, size_t vl
) {
1245 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
1248 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vandn_vx_u32m8_tumu
1249 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1250 // CHECK-RV64-NEXT: entry:
1251 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
1252 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1254 vuint32m8_t
test_vandn_vx_u32m8_tumu(vbool4_t mask
, vuint32m8_t maskedoff
, vuint32m8_t vs2
, uint32_t rs1
, size_t vl
) {
1255 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
1258 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vandn_vv_u64m1_tumu
1259 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1260 // CHECK-RV64-NEXT: entry:
1261 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1262 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1264 vuint64m1_t
test_vandn_vv_u64m1_tumu(vbool64_t mask
, vuint64m1_t maskedoff
, vuint64m1_t vs2
, vuint64m1_t vs1
, size_t vl
) {
1265 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
1268 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vandn_vx_u64m1_tumu
1269 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1270 // CHECK-RV64-NEXT: entry:
1271 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], i64 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1272 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1274 vuint64m1_t
test_vandn_vx_u64m1_tumu(vbool64_t mask
, vuint64m1_t maskedoff
, vuint64m1_t vs2
, uint64_t rs1
, size_t vl
) {
1275 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
1278 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vandn_vv_u64m2_tumu
1279 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1280 // CHECK-RV64-NEXT: entry:
1281 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1282 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1284 vuint64m2_t
test_vandn_vv_u64m2_tumu(vbool32_t mask
, vuint64m2_t maskedoff
, vuint64m2_t vs2
, vuint64m2_t vs1
, size_t vl
) {
1285 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
1288 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vandn_vx_u64m2_tumu
1289 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1290 // CHECK-RV64-NEXT: entry:
1291 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], i64 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1292 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1294 vuint64m2_t
test_vandn_vx_u64m2_tumu(vbool32_t mask
, vuint64m2_t maskedoff
, vuint64m2_t vs2
, uint64_t rs1
, size_t vl
) {
1295 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
1298 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vandn_vv_u64m4_tumu
1299 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1300 // CHECK-RV64-NEXT: entry:
1301 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1302 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1304 vuint64m4_t
test_vandn_vv_u64m4_tumu(vbool16_t mask
, vuint64m4_t maskedoff
, vuint64m4_t vs2
, vuint64m4_t vs1
, size_t vl
) {
1305 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
1308 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vandn_vx_u64m4_tumu
1309 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1310 // CHECK-RV64-NEXT: entry:
1311 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], i64 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1312 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1314 vuint64m4_t
test_vandn_vx_u64m4_tumu(vbool16_t mask
, vuint64m4_t maskedoff
, vuint64m4_t vs2
, uint64_t rs1
, size_t vl
) {
1315 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
1318 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vandn_vv_u64m8_tumu
1319 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1320 // CHECK-RV64-NEXT: entry:
1321 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1322 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1324 vuint64m8_t
test_vandn_vv_u64m8_tumu(vbool8_t mask
, vuint64m8_t maskedoff
, vuint64m8_t vs2
, vuint64m8_t vs1
, size_t vl
) {
1325 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, vs1
, vl
);
1328 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vandn_vx_u64m8_tumu
1329 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1330 // CHECK-RV64-NEXT: entry:
1331 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], i64 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1332 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1334 vuint64m8_t
test_vandn_vx_u64m8_tumu(vbool8_t mask
, vuint64m8_t maskedoff
, vuint64m8_t vs2
, uint64_t rs1
, size_t vl
) {
1335 return __riscv_vandn_tumu(mask
, maskedoff
, vs2
, rs1
, vl
);
1338 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vandn_vv_u8mf8_mu
1339 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1340 // CHECK-RV64-NEXT: entry:
1341 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
1342 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
1344 vuint8mf8_t
test_vandn_vv_u8mf8_mu(vbool64_t mask
, vuint8mf8_t maskedoff
, vuint8mf8_t vs2
, vuint8mf8_t vs1
, size_t vl
) {
1345 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1348 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vandn_vx_u8mf8_mu
1349 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1350 // CHECK-RV64-NEXT: entry:
1351 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], i8 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
1352 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
1354 vuint8mf8_t
test_vandn_vx_u8mf8_mu(vbool64_t mask
, vuint8mf8_t maskedoff
, vuint8mf8_t vs2
, uint8_t rs1
, size_t vl
) {
1355 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1358 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vandn_vv_u8mf4_mu
1359 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1360 // CHECK-RV64-NEXT: entry:
1361 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
1362 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
1364 vuint8mf4_t
test_vandn_vv_u8mf4_mu(vbool32_t mask
, vuint8mf4_t maskedoff
, vuint8mf4_t vs2
, vuint8mf4_t vs1
, size_t vl
) {
1365 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1368 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vandn_vx_u8mf4_mu
1369 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1370 // CHECK-RV64-NEXT: entry:
1371 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], i8 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
1372 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
1374 vuint8mf4_t
test_vandn_vx_u8mf4_mu(vbool32_t mask
, vuint8mf4_t maskedoff
, vuint8mf4_t vs2
, uint8_t rs1
, size_t vl
) {
1375 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1378 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vandn_vv_u8mf2_mu
1379 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1380 // CHECK-RV64-NEXT: entry:
1381 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
1382 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
1384 vuint8mf2_t
test_vandn_vv_u8mf2_mu(vbool16_t mask
, vuint8mf2_t maskedoff
, vuint8mf2_t vs2
, vuint8mf2_t vs1
, size_t vl
) {
1385 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1388 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vandn_vx_u8mf2_mu
1389 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1390 // CHECK-RV64-NEXT: entry:
1391 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], i8 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
1392 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
1394 vuint8mf2_t
test_vandn_vx_u8mf2_mu(vbool16_t mask
, vuint8mf2_t maskedoff
, vuint8mf2_t vs2
, uint8_t rs1
, size_t vl
) {
1395 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1398 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vandn_vv_u8m1_mu
1399 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1400 // CHECK-RV64-NEXT: entry:
1401 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
1402 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
1404 vuint8m1_t
test_vandn_vv_u8m1_mu(vbool8_t mask
, vuint8m1_t maskedoff
, vuint8m1_t vs2
, vuint8m1_t vs1
, size_t vl
) {
1405 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1408 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vandn_vx_u8m1_mu
1409 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1410 // CHECK-RV64-NEXT: entry:
1411 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], i8 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
1412 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
1414 vuint8m1_t
test_vandn_vx_u8m1_mu(vbool8_t mask
, vuint8m1_t maskedoff
, vuint8m1_t vs2
, uint8_t rs1
, size_t vl
) {
1415 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1418 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vandn_vv_u8m2_mu
1419 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1420 // CHECK-RV64-NEXT: entry:
1421 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
1422 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
1424 vuint8m2_t
test_vandn_vv_u8m2_mu(vbool4_t mask
, vuint8m2_t maskedoff
, vuint8m2_t vs2
, vuint8m2_t vs1
, size_t vl
) {
1425 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1428 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vandn_vx_u8m2_mu
1429 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1430 // CHECK-RV64-NEXT: entry:
1431 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], i8 [[RS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
1432 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
1434 vuint8m2_t
test_vandn_vx_u8m2_mu(vbool4_t mask
, vuint8m2_t maskedoff
, vuint8m2_t vs2
, uint8_t rs1
, size_t vl
) {
1435 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1438 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vandn_vv_u8m4_mu
1439 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1440 // CHECK-RV64-NEXT: entry:
1441 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
1442 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
1444 vuint8m4_t
test_vandn_vv_u8m4_mu(vbool2_t mask
, vuint8m4_t maskedoff
, vuint8m4_t vs2
, vuint8m4_t vs1
, size_t vl
) {
1445 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1448 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vandn_vx_u8m4_mu
1449 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1450 // CHECK-RV64-NEXT: entry:
1451 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], i8 [[RS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
1452 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
1454 vuint8m4_t
test_vandn_vx_u8m4_mu(vbool2_t mask
, vuint8m4_t maskedoff
, vuint8m4_t vs2
, uint8_t rs1
, size_t vl
) {
1455 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1458 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vandn_vv_u8m8_mu
1459 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1460 // CHECK-RV64-NEXT: entry:
1461 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1)
1462 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
1464 vuint8m8_t
test_vandn_vv_u8m8_mu(vbool1_t mask
, vuint8m8_t maskedoff
, vuint8m8_t vs2
, vuint8m8_t vs1
, size_t vl
) {
1465 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1468 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vandn_vx_u8m8_mu
1469 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1470 // CHECK-RV64-NEXT: entry:
1471 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], i8 [[RS1]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1)
1472 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
1474 vuint8m8_t
test_vandn_vx_u8m8_mu(vbool1_t mask
, vuint8m8_t maskedoff
, vuint8m8_t vs2
, uint8_t rs1
, size_t vl
) {
1475 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1478 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vandn_vv_u16mf4_mu
1479 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1480 // CHECK-RV64-NEXT: entry:
1481 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
1482 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1484 vuint16mf4_t
test_vandn_vv_u16mf4_mu(vbool64_t mask
, vuint16mf4_t maskedoff
, vuint16mf4_t vs2
, vuint16mf4_t vs1
, size_t vl
) {
1485 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1488 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vandn_vx_u16mf4_mu
1489 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1490 // CHECK-RV64-NEXT: entry:
1491 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], i16 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
1492 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1494 vuint16mf4_t
test_vandn_vx_u16mf4_mu(vbool64_t mask
, vuint16mf4_t maskedoff
, vuint16mf4_t vs2
, uint16_t rs1
, size_t vl
) {
1495 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1498 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vandn_vv_u16mf2_mu
1499 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1500 // CHECK-RV64-NEXT: entry:
1501 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
1502 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1504 vuint16mf2_t
test_vandn_vv_u16mf2_mu(vbool32_t mask
, vuint16mf2_t maskedoff
, vuint16mf2_t vs2
, vuint16mf2_t vs1
, size_t vl
) {
1505 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1508 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vandn_vx_u16mf2_mu
1509 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1510 // CHECK-RV64-NEXT: entry:
1511 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], i16 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
1512 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1514 vuint16mf2_t
test_vandn_vx_u16mf2_mu(vbool32_t mask
, vuint16mf2_t maskedoff
, vuint16mf2_t vs2
, uint16_t rs1
, size_t vl
) {
1515 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1518 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vandn_vv_u16m1_mu
1519 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1520 // CHECK-RV64-NEXT: entry:
1521 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
1522 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1524 vuint16m1_t
test_vandn_vv_u16m1_mu(vbool16_t mask
, vuint16m1_t maskedoff
, vuint16m1_t vs2
, vuint16m1_t vs1
, size_t vl
) {
1525 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1528 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vandn_vx_u16m1_mu
1529 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1530 // CHECK-RV64-NEXT: entry:
1531 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], i16 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
1532 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1534 vuint16m1_t
test_vandn_vx_u16m1_mu(vbool16_t mask
, vuint16m1_t maskedoff
, vuint16m1_t vs2
, uint16_t rs1
, size_t vl
) {
1535 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1538 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vandn_vv_u16m2_mu
1539 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1540 // CHECK-RV64-NEXT: entry:
1541 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
1542 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1544 vuint16m2_t
test_vandn_vv_u16m2_mu(vbool8_t mask
, vuint16m2_t maskedoff
, vuint16m2_t vs2
, vuint16m2_t vs1
, size_t vl
) {
1545 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1548 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vandn_vx_u16m2_mu
1549 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1550 // CHECK-RV64-NEXT: entry:
1551 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], i16 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
1552 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1554 vuint16m2_t
test_vandn_vx_u16m2_mu(vbool8_t mask
, vuint16m2_t maskedoff
, vuint16m2_t vs2
, uint16_t rs1
, size_t vl
) {
1555 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1558 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vandn_vv_u16m4_mu
1559 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1560 // CHECK-RV64-NEXT: entry:
1561 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
1562 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1564 vuint16m4_t
test_vandn_vv_u16m4_mu(vbool4_t mask
, vuint16m4_t maskedoff
, vuint16m4_t vs2
, vuint16m4_t vs1
, size_t vl
) {
1565 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1568 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vandn_vx_u16m4_mu
1569 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1570 // CHECK-RV64-NEXT: entry:
1571 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], i16 [[RS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
1572 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1574 vuint16m4_t
test_vandn_vx_u16m4_mu(vbool4_t mask
, vuint16m4_t maskedoff
, vuint16m4_t vs2
, uint16_t rs1
, size_t vl
) {
1575 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1578 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vandn_vv_u16m8_mu
1579 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1580 // CHECK-RV64-NEXT: entry:
1581 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
1582 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1584 vuint16m8_t
test_vandn_vv_u16m8_mu(vbool2_t mask
, vuint16m8_t maskedoff
, vuint16m8_t vs2
, vuint16m8_t vs1
, size_t vl
) {
1585 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1588 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vandn_vx_u16m8_mu
1589 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 noundef zeroext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1590 // CHECK-RV64-NEXT: entry:
1591 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], i16 [[RS1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
1592 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1594 vuint16m8_t
test_vandn_vx_u16m8_mu(vbool2_t mask
, vuint16m8_t maskedoff
, vuint16m8_t vs2
, uint16_t rs1
, size_t vl
) {
1595 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1598 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vandn_vv_u32mf2_mu
1599 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1600 // CHECK-RV64-NEXT: entry:
1601 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
1602 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1604 vuint32mf2_t
test_vandn_vv_u32mf2_mu(vbool64_t mask
, vuint32mf2_t maskedoff
, vuint32mf2_t vs2
, vuint32mf2_t vs1
, size_t vl
) {
1605 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1608 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vandn_vx_u32mf2_mu
1609 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1610 // CHECK-RV64-NEXT: entry:
1611 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
1612 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1614 vuint32mf2_t
test_vandn_vx_u32mf2_mu(vbool64_t mask
, vuint32mf2_t maskedoff
, vuint32mf2_t vs2
, uint32_t rs1
, size_t vl
) {
1615 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1618 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vandn_vv_u32m1_mu
1619 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1620 // CHECK-RV64-NEXT: entry:
1621 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
1622 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1624 vuint32m1_t
test_vandn_vv_u32m1_mu(vbool32_t mask
, vuint32m1_t maskedoff
, vuint32m1_t vs2
, vuint32m1_t vs1
, size_t vl
) {
1625 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1628 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vandn_vx_u32m1_mu
1629 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1630 // CHECK-RV64-NEXT: entry:
1631 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
1632 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1634 vuint32m1_t
test_vandn_vx_u32m1_mu(vbool32_t mask
, vuint32m1_t maskedoff
, vuint32m1_t vs2
, uint32_t rs1
, size_t vl
) {
1635 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1638 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vandn_vv_u32m2_mu
1639 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1640 // CHECK-RV64-NEXT: entry:
1641 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
1642 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1644 vuint32m2_t
test_vandn_vv_u32m2_mu(vbool16_t mask
, vuint32m2_t maskedoff
, vuint32m2_t vs2
, vuint32m2_t vs1
, size_t vl
) {
1645 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1648 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vandn_vx_u32m2_mu
1649 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1650 // CHECK-RV64-NEXT: entry:
1651 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
1652 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1654 vuint32m2_t
test_vandn_vx_u32m2_mu(vbool16_t mask
, vuint32m2_t maskedoff
, vuint32m2_t vs2
, uint32_t rs1
, size_t vl
) {
1655 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1658 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vandn_vv_u32m4_mu
1659 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1660 // CHECK-RV64-NEXT: entry:
1661 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
1662 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1664 vuint32m4_t
test_vandn_vv_u32m4_mu(vbool8_t mask
, vuint32m4_t maskedoff
, vuint32m4_t vs2
, vuint32m4_t vs1
, size_t vl
) {
1665 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1668 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vandn_vx_u32m4_mu
1669 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1670 // CHECK-RV64-NEXT: entry:
1671 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
1672 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1674 vuint32m4_t
test_vandn_vx_u32m4_mu(vbool8_t mask
, vuint32m4_t maskedoff
, vuint32m4_t vs2
, uint32_t rs1
, size_t vl
) {
1675 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1678 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vandn_vv_u32m8_mu
1679 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1680 // CHECK-RV64-NEXT: entry:
1681 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
1682 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1684 vuint32m8_t
test_vandn_vv_u32m8_mu(vbool4_t mask
, vuint32m8_t maskedoff
, vuint32m8_t vs2
, vuint32m8_t vs1
, size_t vl
) {
1685 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1688 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vandn_vx_u32m8_mu
1689 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1690 // CHECK-RV64-NEXT: entry:
1691 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
1692 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1694 vuint32m8_t
test_vandn_vx_u32m8_mu(vbool4_t mask
, vuint32m8_t maskedoff
, vuint32m8_t vs2
, uint32_t rs1
, size_t vl
) {
1695 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1698 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vandn_vv_u64m1_mu
1699 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1700 // CHECK-RV64-NEXT: entry:
1701 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
1702 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1704 vuint64m1_t
test_vandn_vv_u64m1_mu(vbool64_t mask
, vuint64m1_t maskedoff
, vuint64m1_t vs2
, vuint64m1_t vs1
, size_t vl
) {
1705 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1708 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vandn_vx_u64m1_mu
1709 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1710 // CHECK-RV64-NEXT: entry:
1711 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], i64 [[RS1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
1712 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1714 vuint64m1_t
test_vandn_vx_u64m1_mu(vbool64_t mask
, vuint64m1_t maskedoff
, vuint64m1_t vs2
, uint64_t rs1
, size_t vl
) {
1715 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1718 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vandn_vv_u64m2_mu
1719 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1720 // CHECK-RV64-NEXT: entry:
1721 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
1722 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1724 vuint64m2_t
test_vandn_vv_u64m2_mu(vbool32_t mask
, vuint64m2_t maskedoff
, vuint64m2_t vs2
, vuint64m2_t vs1
, size_t vl
) {
1725 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1728 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vandn_vx_u64m2_mu
1729 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1730 // CHECK-RV64-NEXT: entry:
1731 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], i64 [[RS1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
1732 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1734 vuint64m2_t
test_vandn_vx_u64m2_mu(vbool32_t mask
, vuint64m2_t maskedoff
, vuint64m2_t vs2
, uint64_t rs1
, size_t vl
) {
1735 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1738 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vandn_vv_u64m4_mu
1739 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1740 // CHECK-RV64-NEXT: entry:
1741 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
1742 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1744 vuint64m4_t
test_vandn_vv_u64m4_mu(vbool16_t mask
, vuint64m4_t maskedoff
, vuint64m4_t vs2
, vuint64m4_t vs1
, size_t vl
) {
1745 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1748 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vandn_vx_u64m4_mu
1749 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1750 // CHECK-RV64-NEXT: entry:
1751 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], i64 [[RS1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
1752 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1754 vuint64m4_t
test_vandn_vx_u64m4_mu(vbool16_t mask
, vuint64m4_t maskedoff
, vuint64m4_t vs2
, uint64_t rs1
, size_t vl
) {
1755 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);
1758 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vandn_vv_u64m8_mu
1759 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1760 // CHECK-RV64-NEXT: entry:
1761 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
1762 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1764 vuint64m8_t
test_vandn_vv_u64m8_mu(vbool8_t mask
, vuint64m8_t maskedoff
, vuint64m8_t vs2
, vuint64m8_t vs1
, size_t vl
) {
1765 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, vs1
, vl
);
1768 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vandn_vx_u64m8_mu
1769 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1770 // CHECK-RV64-NEXT: entry:
1771 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], i64 [[RS1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
1772 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1774 vuint64m8_t
test_vandn_vx_u64m8_mu(vbool8_t mask
, vuint64m8_t maskedoff
, vuint64m8_t vs2
, uint64_t rs1
, size_t vl
) {
1775 return __riscv_vandn_mu(mask
, maskedoff
, vs2
, rs1
, vl
);