1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
4 // RUN: -target-feature +zvfh -disable-O0-optnone \
5 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
6 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
8 #include <riscv_vector.h>
10 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vslidedown_vx_f16mf4_tu
11 // CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
12 // CHECK-RV64-NEXT: entry:
13 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
14 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
16 vfloat16mf4_t
test_vslidedown_vx_f16mf4_tu(vfloat16mf4_t maskedoff
, vfloat16mf4_t src
, size_t offset
, size_t vl
) {
17 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
20 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vslidedown_vx_f16mf2_tu
21 // CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
22 // CHECK-RV64-NEXT: entry:
23 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
24 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
26 vfloat16mf2_t
test_vslidedown_vx_f16mf2_tu(vfloat16mf2_t maskedoff
, vfloat16mf2_t src
, size_t offset
, size_t vl
) {
27 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
30 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vslidedown_vx_f16m1_tu
31 // CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
32 // CHECK-RV64-NEXT: entry:
33 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
34 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
36 vfloat16m1_t
test_vslidedown_vx_f16m1_tu(vfloat16m1_t maskedoff
, vfloat16m1_t src
, size_t offset
, size_t vl
) {
37 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
40 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vslidedown_vx_f16m2_tu
41 // CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
42 // CHECK-RV64-NEXT: entry:
43 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
44 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
46 vfloat16m2_t
test_vslidedown_vx_f16m2_tu(vfloat16m2_t maskedoff
, vfloat16m2_t src
, size_t offset
, size_t vl
) {
47 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
50 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vslidedown_vx_f16m4_tu
51 // CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
52 // CHECK-RV64-NEXT: entry:
53 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
54 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
56 vfloat16m4_t
test_vslidedown_vx_f16m4_tu(vfloat16m4_t maskedoff
, vfloat16m4_t src
, size_t offset
, size_t vl
) {
57 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
60 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vslidedown_vx_f16m8_tu
61 // CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
62 // CHECK-RV64-NEXT: entry:
63 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
64 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
66 vfloat16m8_t
test_vslidedown_vx_f16m8_tu(vfloat16m8_t maskedoff
, vfloat16m8_t src
, size_t offset
, size_t vl
) {
67 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
70 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vslidedown_vx_f32mf2_tu
71 // CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
72 // CHECK-RV64-NEXT: entry:
73 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
74 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
76 vfloat32mf2_t
test_vslidedown_vx_f32mf2_tu(vfloat32mf2_t maskedoff
, vfloat32mf2_t src
, size_t offset
, size_t vl
) {
77 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
80 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vslidedown_vx_f32m1_tu
81 // CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
82 // CHECK-RV64-NEXT: entry:
83 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
84 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
86 vfloat32m1_t
test_vslidedown_vx_f32m1_tu(vfloat32m1_t maskedoff
, vfloat32m1_t src
, size_t offset
, size_t vl
) {
87 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
90 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vslidedown_vx_f32m2_tu
91 // CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
92 // CHECK-RV64-NEXT: entry:
93 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
94 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
96 vfloat32m2_t
test_vslidedown_vx_f32m2_tu(vfloat32m2_t maskedoff
, vfloat32m2_t src
, size_t offset
, size_t vl
) {
97 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
100 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vslidedown_vx_f32m4_tu
101 // CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
102 // CHECK-RV64-NEXT: entry:
103 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
104 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
106 vfloat32m4_t
test_vslidedown_vx_f32m4_tu(vfloat32m4_t maskedoff
, vfloat32m4_t src
, size_t offset
, size_t vl
) {
107 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
110 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vslidedown_vx_f32m8_tu
111 // CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
112 // CHECK-RV64-NEXT: entry:
113 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
114 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
116 vfloat32m8_t
test_vslidedown_vx_f32m8_tu(vfloat32m8_t maskedoff
, vfloat32m8_t src
, size_t offset
, size_t vl
) {
117 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
120 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vslidedown_vx_f64m1_tu
121 // CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
122 // CHECK-RV64-NEXT: entry:
123 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
124 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
126 vfloat64m1_t
test_vslidedown_vx_f64m1_tu(vfloat64m1_t maskedoff
, vfloat64m1_t src
, size_t offset
, size_t vl
) {
127 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
130 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vslidedown_vx_f64m2_tu
131 // CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
132 // CHECK-RV64-NEXT: entry:
133 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
134 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
136 vfloat64m2_t
test_vslidedown_vx_f64m2_tu(vfloat64m2_t maskedoff
, vfloat64m2_t src
, size_t offset
, size_t vl
) {
137 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
140 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vslidedown_vx_f64m4_tu
141 // CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
142 // CHECK-RV64-NEXT: entry:
143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
144 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
146 vfloat64m4_t
test_vslidedown_vx_f64m4_tu(vfloat64m4_t maskedoff
, vfloat64m4_t src
, size_t offset
, size_t vl
) {
147 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
150 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vslidedown_vx_f64m8_tu
151 // CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
152 // CHECK-RV64-NEXT: entry:
153 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
154 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
156 vfloat64m8_t
test_vslidedown_vx_f64m8_tu(vfloat64m8_t maskedoff
, vfloat64m8_t src
, size_t offset
, size_t vl
) {
157 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
160 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vslidedown_vx_i8mf8_tu
161 // CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
162 // CHECK-RV64-NEXT: entry:
163 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
164 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
166 vint8mf8_t
test_vslidedown_vx_i8mf8_tu(vint8mf8_t maskedoff
, vint8mf8_t src
, size_t offset
, size_t vl
) {
167 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
170 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vslidedown_vx_i8mf4_tu
171 // CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
172 // CHECK-RV64-NEXT: entry:
173 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
174 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
176 vint8mf4_t
test_vslidedown_vx_i8mf4_tu(vint8mf4_t maskedoff
, vint8mf4_t src
, size_t offset
, size_t vl
) {
177 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
180 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vslidedown_vx_i8mf2_tu
181 // CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
182 // CHECK-RV64-NEXT: entry:
183 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
184 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
186 vint8mf2_t
test_vslidedown_vx_i8mf2_tu(vint8mf2_t maskedoff
, vint8mf2_t src
, size_t offset
, size_t vl
) {
187 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
190 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vslidedown_vx_i8m1_tu
191 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
192 // CHECK-RV64-NEXT: entry:
193 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
194 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
196 vint8m1_t
test_vslidedown_vx_i8m1_tu(vint8m1_t maskedoff
, vint8m1_t src
, size_t offset
, size_t vl
) {
197 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
200 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vslidedown_vx_i8m2_tu
201 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
202 // CHECK-RV64-NEXT: entry:
203 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
204 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
206 vint8m2_t
test_vslidedown_vx_i8m2_tu(vint8m2_t maskedoff
, vint8m2_t src
, size_t offset
, size_t vl
) {
207 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
210 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vslidedown_vx_i8m4_tu
211 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
212 // CHECK-RV64-NEXT: entry:
213 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
214 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
216 vint8m4_t
test_vslidedown_vx_i8m4_tu(vint8m4_t maskedoff
, vint8m4_t src
, size_t offset
, size_t vl
) {
217 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
220 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vslidedown_vx_i8m8_tu
221 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
222 // CHECK-RV64-NEXT: entry:
223 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
224 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
226 vint8m8_t
test_vslidedown_vx_i8m8_tu(vint8m8_t maskedoff
, vint8m8_t src
, size_t offset
, size_t vl
) {
227 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
230 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vslidedown_vx_i16mf4_tu
231 // CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
232 // CHECK-RV64-NEXT: entry:
233 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
234 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
236 vint16mf4_t
test_vslidedown_vx_i16mf4_tu(vint16mf4_t maskedoff
, vint16mf4_t src
, size_t offset
, size_t vl
) {
237 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
240 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vslidedown_vx_i16mf2_tu
241 // CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
242 // CHECK-RV64-NEXT: entry:
243 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
244 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
246 vint16mf2_t
test_vslidedown_vx_i16mf2_tu(vint16mf2_t maskedoff
, vint16mf2_t src
, size_t offset
, size_t vl
) {
247 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
250 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vslidedown_vx_i16m1_tu
251 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
252 // CHECK-RV64-NEXT: entry:
253 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
254 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
256 vint16m1_t
test_vslidedown_vx_i16m1_tu(vint16m1_t maskedoff
, vint16m1_t src
, size_t offset
, size_t vl
) {
257 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
260 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vslidedown_vx_i16m2_tu
261 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
262 // CHECK-RV64-NEXT: entry:
263 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
264 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
266 vint16m2_t
test_vslidedown_vx_i16m2_tu(vint16m2_t maskedoff
, vint16m2_t src
, size_t offset
, size_t vl
) {
267 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
270 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vslidedown_vx_i16m4_tu
271 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
272 // CHECK-RV64-NEXT: entry:
273 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
274 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
276 vint16m4_t
test_vslidedown_vx_i16m4_tu(vint16m4_t maskedoff
, vint16m4_t src
, size_t offset
, size_t vl
) {
277 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
280 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vslidedown_vx_i16m8_tu
281 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
282 // CHECK-RV64-NEXT: entry:
283 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
284 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
286 vint16m8_t
test_vslidedown_vx_i16m8_tu(vint16m8_t maskedoff
, vint16m8_t src
, size_t offset
, size_t vl
) {
287 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
290 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vslidedown_vx_i32mf2_tu
291 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
292 // CHECK-RV64-NEXT: entry:
293 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
294 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
296 vint32mf2_t
test_vslidedown_vx_i32mf2_tu(vint32mf2_t maskedoff
, vint32mf2_t src
, size_t offset
, size_t vl
) {
297 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
300 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vslidedown_vx_i32m1_tu
301 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
302 // CHECK-RV64-NEXT: entry:
303 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
304 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
306 vint32m1_t
test_vslidedown_vx_i32m1_tu(vint32m1_t maskedoff
, vint32m1_t src
, size_t offset
, size_t vl
) {
307 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
310 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vslidedown_vx_i32m2_tu
311 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
312 // CHECK-RV64-NEXT: entry:
313 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
314 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
316 vint32m2_t
test_vslidedown_vx_i32m2_tu(vint32m2_t maskedoff
, vint32m2_t src
, size_t offset
, size_t vl
) {
317 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
320 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vslidedown_vx_i32m4_tu
321 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
322 // CHECK-RV64-NEXT: entry:
323 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
324 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
326 vint32m4_t
test_vslidedown_vx_i32m4_tu(vint32m4_t maskedoff
, vint32m4_t src
, size_t offset
, size_t vl
) {
327 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
330 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vslidedown_vx_i32m8_tu
331 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
332 // CHECK-RV64-NEXT: entry:
333 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
334 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
336 vint32m8_t
test_vslidedown_vx_i32m8_tu(vint32m8_t maskedoff
, vint32m8_t src
, size_t offset
, size_t vl
) {
337 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
340 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vslidedown_vx_i64m1_tu
341 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
342 // CHECK-RV64-NEXT: entry:
343 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
344 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
346 vint64m1_t
test_vslidedown_vx_i64m1_tu(vint64m1_t maskedoff
, vint64m1_t src
, size_t offset
, size_t vl
) {
347 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
350 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vslidedown_vx_i64m2_tu
351 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
352 // CHECK-RV64-NEXT: entry:
353 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
354 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
356 vint64m2_t
test_vslidedown_vx_i64m2_tu(vint64m2_t maskedoff
, vint64m2_t src
, size_t offset
, size_t vl
) {
357 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
360 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vslidedown_vx_i64m4_tu
361 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
362 // CHECK-RV64-NEXT: entry:
363 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
364 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
366 vint64m4_t
test_vslidedown_vx_i64m4_tu(vint64m4_t maskedoff
, vint64m4_t src
, size_t offset
, size_t vl
) {
367 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
370 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vslidedown_vx_i64m8_tu
371 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
372 // CHECK-RV64-NEXT: entry:
373 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
374 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
376 vint64m8_t
test_vslidedown_vx_i64m8_tu(vint64m8_t maskedoff
, vint64m8_t src
, size_t offset
, size_t vl
) {
377 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
380 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vslidedown_vx_u8mf8_tu
381 // CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
382 // CHECK-RV64-NEXT: entry:
383 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
384 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
386 vuint8mf8_t
test_vslidedown_vx_u8mf8_tu(vuint8mf8_t maskedoff
, vuint8mf8_t src
, size_t offset
, size_t vl
) {
387 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
390 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vslidedown_vx_u8mf4_tu
391 // CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
392 // CHECK-RV64-NEXT: entry:
393 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
394 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
396 vuint8mf4_t
test_vslidedown_vx_u8mf4_tu(vuint8mf4_t maskedoff
, vuint8mf4_t src
, size_t offset
, size_t vl
) {
397 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
400 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vslidedown_vx_u8mf2_tu
401 // CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
402 // CHECK-RV64-NEXT: entry:
403 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
404 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
406 vuint8mf2_t
test_vslidedown_vx_u8mf2_tu(vuint8mf2_t maskedoff
, vuint8mf2_t src
, size_t offset
, size_t vl
) {
407 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
410 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vslidedown_vx_u8m1_tu
411 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
412 // CHECK-RV64-NEXT: entry:
413 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
414 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
416 vuint8m1_t
test_vslidedown_vx_u8m1_tu(vuint8m1_t maskedoff
, vuint8m1_t src
, size_t offset
, size_t vl
) {
417 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
420 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vslidedown_vx_u8m2_tu
421 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
422 // CHECK-RV64-NEXT: entry:
423 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
424 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
426 vuint8m2_t
test_vslidedown_vx_u8m2_tu(vuint8m2_t maskedoff
, vuint8m2_t src
, size_t offset
, size_t vl
) {
427 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
430 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vslidedown_vx_u8m4_tu
431 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
432 // CHECK-RV64-NEXT: entry:
433 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
434 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
436 vuint8m4_t
test_vslidedown_vx_u8m4_tu(vuint8m4_t maskedoff
, vuint8m4_t src
, size_t offset
, size_t vl
) {
437 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
440 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vslidedown_vx_u8m8_tu
441 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
442 // CHECK-RV64-NEXT: entry:
443 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
444 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
446 vuint8m8_t
test_vslidedown_vx_u8m8_tu(vuint8m8_t maskedoff
, vuint8m8_t src
, size_t offset
, size_t vl
) {
447 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
450 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vslidedown_vx_u16mf4_tu
451 // CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
452 // CHECK-RV64-NEXT: entry:
453 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
454 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
456 vuint16mf4_t
test_vslidedown_vx_u16mf4_tu(vuint16mf4_t maskedoff
, vuint16mf4_t src
, size_t offset
, size_t vl
) {
457 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
460 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vslidedown_vx_u16mf2_tu
461 // CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
462 // CHECK-RV64-NEXT: entry:
463 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
464 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
466 vuint16mf2_t
test_vslidedown_vx_u16mf2_tu(vuint16mf2_t maskedoff
, vuint16mf2_t src
, size_t offset
, size_t vl
) {
467 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
470 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vslidedown_vx_u16m1_tu
471 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
472 // CHECK-RV64-NEXT: entry:
473 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
474 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
476 vuint16m1_t
test_vslidedown_vx_u16m1_tu(vuint16m1_t maskedoff
, vuint16m1_t src
, size_t offset
, size_t vl
) {
477 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
480 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vslidedown_vx_u16m2_tu
481 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
482 // CHECK-RV64-NEXT: entry:
483 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
484 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
486 vuint16m2_t
test_vslidedown_vx_u16m2_tu(vuint16m2_t maskedoff
, vuint16m2_t src
, size_t offset
, size_t vl
) {
487 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
490 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vslidedown_vx_u16m4_tu
491 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
492 // CHECK-RV64-NEXT: entry:
493 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
494 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
496 vuint16m4_t
test_vslidedown_vx_u16m4_tu(vuint16m4_t maskedoff
, vuint16m4_t src
, size_t offset
, size_t vl
) {
497 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
500 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vslidedown_vx_u16m8_tu
501 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
502 // CHECK-RV64-NEXT: entry:
503 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
504 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
506 vuint16m8_t
test_vslidedown_vx_u16m8_tu(vuint16m8_t maskedoff
, vuint16m8_t src
, size_t offset
, size_t vl
) {
507 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
510 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vslidedown_vx_u32mf2_tu
511 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
512 // CHECK-RV64-NEXT: entry:
513 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
514 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
516 vuint32mf2_t
test_vslidedown_vx_u32mf2_tu(vuint32mf2_t maskedoff
, vuint32mf2_t src
, size_t offset
, size_t vl
) {
517 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
520 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vslidedown_vx_u32m1_tu
521 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
522 // CHECK-RV64-NEXT: entry:
523 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
524 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
526 vuint32m1_t
test_vslidedown_vx_u32m1_tu(vuint32m1_t maskedoff
, vuint32m1_t src
, size_t offset
, size_t vl
) {
527 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
530 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vslidedown_vx_u32m2_tu
531 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
532 // CHECK-RV64-NEXT: entry:
533 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
534 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
536 vuint32m2_t
test_vslidedown_vx_u32m2_tu(vuint32m2_t maskedoff
, vuint32m2_t src
, size_t offset
, size_t vl
) {
537 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
540 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vslidedown_vx_u32m4_tu
541 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
542 // CHECK-RV64-NEXT: entry:
543 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
544 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
546 vuint32m4_t
test_vslidedown_vx_u32m4_tu(vuint32m4_t maskedoff
, vuint32m4_t src
, size_t offset
, size_t vl
) {
547 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
550 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vslidedown_vx_u32m8_tu
551 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
552 // CHECK-RV64-NEXT: entry:
553 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
554 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
556 vuint32m8_t
test_vslidedown_vx_u32m8_tu(vuint32m8_t maskedoff
, vuint32m8_t src
, size_t offset
, size_t vl
) {
557 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
560 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vslidedown_vx_u64m1_tu
561 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
562 // CHECK-RV64-NEXT: entry:
563 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
564 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
566 vuint64m1_t
test_vslidedown_vx_u64m1_tu(vuint64m1_t maskedoff
, vuint64m1_t src
, size_t offset
, size_t vl
) {
567 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
570 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vslidedown_vx_u64m2_tu
571 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
572 // CHECK-RV64-NEXT: entry:
573 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
574 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
576 vuint64m2_t
test_vslidedown_vx_u64m2_tu(vuint64m2_t maskedoff
, vuint64m2_t src
, size_t offset
, size_t vl
) {
577 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
580 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vslidedown_vx_u64m4_tu
581 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
582 // CHECK-RV64-NEXT: entry:
583 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
584 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
586 vuint64m4_t
test_vslidedown_vx_u64m4_tu(vuint64m4_t maskedoff
, vuint64m4_t src
, size_t offset
, size_t vl
) {
587 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
590 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vslidedown_vx_u64m8_tu
591 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
592 // CHECK-RV64-NEXT: entry:
593 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 2)
594 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
596 vuint64m8_t
test_vslidedown_vx_u64m8_tu(vuint64m8_t maskedoff
, vuint64m8_t src
, size_t offset
, size_t vl
) {
597 return __riscv_vslidedown_tu(maskedoff
, src
, offset
, vl
);
600 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vslidedown_vx_f16mf4_tum
601 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
602 // CHECK-RV64-NEXT: entry:
603 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
604 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
606 vfloat16mf4_t
test_vslidedown_vx_f16mf4_tum(vbool64_t mask
, vfloat16mf4_t maskedoff
, vfloat16mf4_t src
, size_t offset
, size_t vl
) {
607 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
610 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vslidedown_vx_f16mf2_tum
611 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
612 // CHECK-RV64-NEXT: entry:
613 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
614 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
616 vfloat16mf2_t
test_vslidedown_vx_f16mf2_tum(vbool32_t mask
, vfloat16mf2_t maskedoff
, vfloat16mf2_t src
, size_t offset
, size_t vl
) {
617 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
620 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vslidedown_vx_f16m1_tum
621 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
622 // CHECK-RV64-NEXT: entry:
623 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
624 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
626 vfloat16m1_t
test_vslidedown_vx_f16m1_tum(vbool16_t mask
, vfloat16m1_t maskedoff
, vfloat16m1_t src
, size_t offset
, size_t vl
) {
627 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
630 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vslidedown_vx_f16m2_tum
631 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
632 // CHECK-RV64-NEXT: entry:
633 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
634 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
636 vfloat16m2_t
test_vslidedown_vx_f16m2_tum(vbool8_t mask
, vfloat16m2_t maskedoff
, vfloat16m2_t src
, size_t offset
, size_t vl
) {
637 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
640 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vslidedown_vx_f16m4_tum
641 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
642 // CHECK-RV64-NEXT: entry:
643 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
644 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
646 vfloat16m4_t
test_vslidedown_vx_f16m4_tum(vbool4_t mask
, vfloat16m4_t maskedoff
, vfloat16m4_t src
, size_t offset
, size_t vl
) {
647 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
650 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vslidedown_vx_f16m8_tum
651 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
652 // CHECK-RV64-NEXT: entry:
653 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[SRC]], i64 [[OFFSET]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
654 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
656 vfloat16m8_t
test_vslidedown_vx_f16m8_tum(vbool2_t mask
, vfloat16m8_t maskedoff
, vfloat16m8_t src
, size_t offset
, size_t vl
) {
657 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
660 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vslidedown_vx_f32mf2_tum
661 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
662 // CHECK-RV64-NEXT: entry:
663 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
664 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
666 vfloat32mf2_t
test_vslidedown_vx_f32mf2_tum(vbool64_t mask
, vfloat32mf2_t maskedoff
, vfloat32mf2_t src
, size_t offset
, size_t vl
) {
667 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
670 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vslidedown_vx_f32m1_tum
671 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
672 // CHECK-RV64-NEXT: entry:
673 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
674 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
676 vfloat32m1_t
test_vslidedown_vx_f32m1_tum(vbool32_t mask
, vfloat32m1_t maskedoff
, vfloat32m1_t src
, size_t offset
, size_t vl
) {
677 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
680 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vslidedown_vx_f32m2_tum
681 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
682 // CHECK-RV64-NEXT: entry:
683 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
684 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
686 vfloat32m2_t
test_vslidedown_vx_f32m2_tum(vbool16_t mask
, vfloat32m2_t maskedoff
, vfloat32m2_t src
, size_t offset
, size_t vl
) {
687 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
690 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vslidedown_vx_f32m4_tum
691 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
692 // CHECK-RV64-NEXT: entry:
693 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
694 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
696 vfloat32m4_t
test_vslidedown_vx_f32m4_tum(vbool8_t mask
, vfloat32m4_t maskedoff
, vfloat32m4_t src
, size_t offset
, size_t vl
) {
697 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
700 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vslidedown_vx_f32m8_tum
701 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
702 // CHECK-RV64-NEXT: entry:
703 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
704 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
706 vfloat32m8_t
test_vslidedown_vx_f32m8_tum(vbool4_t mask
, vfloat32m8_t maskedoff
, vfloat32m8_t src
, size_t offset
, size_t vl
) {
707 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
710 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vslidedown_vx_f64m1_tum
711 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
712 // CHECK-RV64-NEXT: entry:
713 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
714 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
716 vfloat64m1_t
test_vslidedown_vx_f64m1_tum(vbool64_t mask
, vfloat64m1_t maskedoff
, vfloat64m1_t src
, size_t offset
, size_t vl
) {
717 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
720 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vslidedown_vx_f64m2_tum
721 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
722 // CHECK-RV64-NEXT: entry:
723 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
724 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
726 vfloat64m2_t
test_vslidedown_vx_f64m2_tum(vbool32_t mask
, vfloat64m2_t maskedoff
, vfloat64m2_t src
, size_t offset
, size_t vl
) {
727 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
730 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vslidedown_vx_f64m4_tum
731 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
732 // CHECK-RV64-NEXT: entry:
733 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
734 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
736 vfloat64m4_t
test_vslidedown_vx_f64m4_tum(vbool16_t mask
, vfloat64m4_t maskedoff
, vfloat64m4_t src
, size_t offset
, size_t vl
) {
737 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
740 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vslidedown_vx_f64m8_tum
741 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
742 // CHECK-RV64-NEXT: entry:
743 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
744 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
746 vfloat64m8_t
test_vslidedown_vx_f64m8_tum(vbool8_t mask
, vfloat64m8_t maskedoff
, vfloat64m8_t src
, size_t offset
, size_t vl
) {
747 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
750 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vslidedown_vx_i8mf8_tum
751 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
752 // CHECK-RV64-NEXT: entry:
753 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
754 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
756 vint8mf8_t
test_vslidedown_vx_i8mf8_tum(vbool64_t mask
, vint8mf8_t maskedoff
, vint8mf8_t src
, size_t offset
, size_t vl
) {
757 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
760 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vslidedown_vx_i8mf4_tum
761 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
762 // CHECK-RV64-NEXT: entry:
763 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
764 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
766 vint8mf4_t
test_vslidedown_vx_i8mf4_tum(vbool32_t mask
, vint8mf4_t maskedoff
, vint8mf4_t src
, size_t offset
, size_t vl
) {
767 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
770 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vslidedown_vx_i8mf2_tum
771 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
772 // CHECK-RV64-NEXT: entry:
773 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
774 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
776 vint8mf2_t
test_vslidedown_vx_i8mf2_tum(vbool16_t mask
, vint8mf2_t maskedoff
, vint8mf2_t src
, size_t offset
, size_t vl
) {
777 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
780 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vslidedown_vx_i8m1_tum
781 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
782 // CHECK-RV64-NEXT: entry:
783 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
784 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
786 vint8m1_t
test_vslidedown_vx_i8m1_tum(vbool8_t mask
, vint8m1_t maskedoff
, vint8m1_t src
, size_t offset
, size_t vl
) {
787 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
790 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vslidedown_vx_i8m2_tum
791 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
792 // CHECK-RV64-NEXT: entry:
793 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
794 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
796 vint8m2_t
test_vslidedown_vx_i8m2_tum(vbool4_t mask
, vint8m2_t maskedoff
, vint8m2_t src
, size_t offset
, size_t vl
) {
797 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
800 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vslidedown_vx_i8m4_tum
801 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
802 // CHECK-RV64-NEXT: entry:
803 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
804 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
806 vint8m4_t
test_vslidedown_vx_i8m4_tum(vbool2_t mask
, vint8m4_t maskedoff
, vint8m4_t src
, size_t offset
, size_t vl
) {
807 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
810 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vslidedown_vx_i8m8_tum
811 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
812 // CHECK-RV64-NEXT: entry:
813 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2)
814 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
816 vint8m8_t
test_vslidedown_vx_i8m8_tum(vbool1_t mask
, vint8m8_t maskedoff
, vint8m8_t src
, size_t offset
, size_t vl
) {
817 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
820 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vslidedown_vx_i16mf4_tum
821 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
822 // CHECK-RV64-NEXT: entry:
823 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
824 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
826 vint16mf4_t
test_vslidedown_vx_i16mf4_tum(vbool64_t mask
, vint16mf4_t maskedoff
, vint16mf4_t src
, size_t offset
, size_t vl
) {
827 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
830 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vslidedown_vx_i16mf2_tum
831 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
832 // CHECK-RV64-NEXT: entry:
833 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
834 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
836 vint16mf2_t
test_vslidedown_vx_i16mf2_tum(vbool32_t mask
, vint16mf2_t maskedoff
, vint16mf2_t src
, size_t offset
, size_t vl
) {
837 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
840 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vslidedown_vx_i16m1_tum
841 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
842 // CHECK-RV64-NEXT: entry:
843 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
844 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
846 vint16m1_t
test_vslidedown_vx_i16m1_tum(vbool16_t mask
, vint16m1_t maskedoff
, vint16m1_t src
, size_t offset
, size_t vl
) {
847 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
850 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vslidedown_vx_i16m2_tum
851 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
852 // CHECK-RV64-NEXT: entry:
853 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
854 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
856 vint16m2_t
test_vslidedown_vx_i16m2_tum(vbool8_t mask
, vint16m2_t maskedoff
, vint16m2_t src
, size_t offset
, size_t vl
) {
857 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
860 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vslidedown_vx_i16m4_tum
861 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
862 // CHECK-RV64-NEXT: entry:
863 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
864 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
866 vint16m4_t
test_vslidedown_vx_i16m4_tum(vbool4_t mask
, vint16m4_t maskedoff
, vint16m4_t src
, size_t offset
, size_t vl
) {
867 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
870 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vslidedown_vx_i16m8_tum
871 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
872 // CHECK-RV64-NEXT: entry:
873 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
874 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
876 vint16m8_t
test_vslidedown_vx_i16m8_tum(vbool2_t mask
, vint16m8_t maskedoff
, vint16m8_t src
, size_t offset
, size_t vl
) {
877 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
880 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vslidedown_vx_i32mf2_tum
881 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
882 // CHECK-RV64-NEXT: entry:
883 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
884 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
886 vint32mf2_t
test_vslidedown_vx_i32mf2_tum(vbool64_t mask
, vint32mf2_t maskedoff
, vint32mf2_t src
, size_t offset
, size_t vl
) {
887 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
890 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vslidedown_vx_i32m1_tum
891 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
892 // CHECK-RV64-NEXT: entry:
893 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
894 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
896 vint32m1_t
test_vslidedown_vx_i32m1_tum(vbool32_t mask
, vint32m1_t maskedoff
, vint32m1_t src
, size_t offset
, size_t vl
) {
897 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
900 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vslidedown_vx_i32m2_tum
901 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
902 // CHECK-RV64-NEXT: entry:
903 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
904 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
906 vint32m2_t
test_vslidedown_vx_i32m2_tum(vbool16_t mask
, vint32m2_t maskedoff
, vint32m2_t src
, size_t offset
, size_t vl
) {
907 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
910 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vslidedown_vx_i32m4_tum
911 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
912 // CHECK-RV64-NEXT: entry:
913 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
914 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
916 vint32m4_t
test_vslidedown_vx_i32m4_tum(vbool8_t mask
, vint32m4_t maskedoff
, vint32m4_t src
, size_t offset
, size_t vl
) {
917 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
920 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vslidedown_vx_i32m8_tum
921 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
922 // CHECK-RV64-NEXT: entry:
923 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
924 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
926 vint32m8_t
test_vslidedown_vx_i32m8_tum(vbool4_t mask
, vint32m8_t maskedoff
, vint32m8_t src
, size_t offset
, size_t vl
) {
927 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
930 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vslidedown_vx_i64m1_tum
931 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
932 // CHECK-RV64-NEXT: entry:
933 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
934 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
936 vint64m1_t
test_vslidedown_vx_i64m1_tum(vbool64_t mask
, vint64m1_t maskedoff
, vint64m1_t src
, size_t offset
, size_t vl
) {
937 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
940 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vslidedown_vx_i64m2_tum
941 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
942 // CHECK-RV64-NEXT: entry:
943 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
944 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
946 vint64m2_t
test_vslidedown_vx_i64m2_tum(vbool32_t mask
, vint64m2_t maskedoff
, vint64m2_t src
, size_t offset
, size_t vl
) {
947 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
950 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vslidedown_vx_i64m4_tum
951 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
952 // CHECK-RV64-NEXT: entry:
953 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
954 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
956 vint64m4_t
test_vslidedown_vx_i64m4_tum(vbool16_t mask
, vint64m4_t maskedoff
, vint64m4_t src
, size_t offset
, size_t vl
) {
957 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
960 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vslidedown_vx_i64m8_tum
961 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
962 // CHECK-RV64-NEXT: entry:
963 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
964 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
966 vint64m8_t
test_vslidedown_vx_i64m8_tum(vbool8_t mask
, vint64m8_t maskedoff
, vint64m8_t src
, size_t offset
, size_t vl
) {
967 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
970 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vslidedown_vx_u8mf8_tum
971 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
972 // CHECK-RV64-NEXT: entry:
973 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
974 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
976 vuint8mf8_t
test_vslidedown_vx_u8mf8_tum(vbool64_t mask
, vuint8mf8_t maskedoff
, vuint8mf8_t src
, size_t offset
, size_t vl
) {
977 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
980 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vslidedown_vx_u8mf4_tum
981 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
982 // CHECK-RV64-NEXT: entry:
983 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
984 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
986 vuint8mf4_t
test_vslidedown_vx_u8mf4_tum(vbool32_t mask
, vuint8mf4_t maskedoff
, vuint8mf4_t src
, size_t offset
, size_t vl
) {
987 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
990 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vslidedown_vx_u8mf2_tum
991 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
992 // CHECK-RV64-NEXT: entry:
993 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
994 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
996 vuint8mf2_t
test_vslidedown_vx_u8mf2_tum(vbool16_t mask
, vuint8mf2_t maskedoff
, vuint8mf2_t src
, size_t offset
, size_t vl
) {
997 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
1000 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vslidedown_vx_u8m1_tum
1001 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1002 // CHECK-RV64-NEXT: entry:
1003 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
1004 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
1006 vuint8m1_t
test_vslidedown_vx_u8m1_tum(vbool8_t mask
, vuint8m1_t maskedoff
, vuint8m1_t src
, size_t offset
, size_t vl
) {
1007 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
1010 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vslidedown_vx_u8m2_tum
1011 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1012 // CHECK-RV64-NEXT: entry:
1013 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
1014 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
1016 vuint8m2_t
test_vslidedown_vx_u8m2_tum(vbool4_t mask
, vuint8m2_t maskedoff
, vuint8m2_t src
, size_t offset
, size_t vl
) {
1017 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
1020 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vslidedown_vx_u8m4_tum
1021 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1022 // CHECK-RV64-NEXT: entry:
1023 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
1024 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
1026 vuint8m4_t
test_vslidedown_vx_u8m4_tum(vbool2_t mask
, vuint8m4_t maskedoff
, vuint8m4_t src
, size_t offset
, size_t vl
) {
1027 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
1030 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vslidedown_vx_u8m8_tum
1031 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1032 // CHECK-RV64-NEXT: entry:
1033 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2)
1034 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
1036 vuint8m8_t
test_vslidedown_vx_u8m8_tum(vbool1_t mask
, vuint8m8_t maskedoff
, vuint8m8_t src
, size_t offset
, size_t vl
) {
1037 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
1040 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vslidedown_vx_u16mf4_tum
1041 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1042 // CHECK-RV64-NEXT: entry:
1043 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
1044 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1046 vuint16mf4_t
test_vslidedown_vx_u16mf4_tum(vbool64_t mask
, vuint16mf4_t maskedoff
, vuint16mf4_t src
, size_t offset
, size_t vl
) {
1047 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
1050 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vslidedown_vx_u16mf2_tum
1051 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1052 // CHECK-RV64-NEXT: entry:
1053 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
1054 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1056 vuint16mf2_t
test_vslidedown_vx_u16mf2_tum(vbool32_t mask
, vuint16mf2_t maskedoff
, vuint16mf2_t src
, size_t offset
, size_t vl
) {
1057 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
1060 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vslidedown_vx_u16m1_tum
1061 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1062 // CHECK-RV64-NEXT: entry:
1063 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
1064 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1066 vuint16m1_t
test_vslidedown_vx_u16m1_tum(vbool16_t mask
, vuint16m1_t maskedoff
, vuint16m1_t src
, size_t offset
, size_t vl
) {
1067 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
1070 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vslidedown_vx_u16m2_tum
1071 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1072 // CHECK-RV64-NEXT: entry:
1073 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
1074 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1076 vuint16m2_t
test_vslidedown_vx_u16m2_tum(vbool8_t mask
, vuint16m2_t maskedoff
, vuint16m2_t src
, size_t offset
, size_t vl
) {
1077 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
1080 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vslidedown_vx_u16m4_tum
1081 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1082 // CHECK-RV64-NEXT: entry:
1083 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
1084 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1086 vuint16m4_t
test_vslidedown_vx_u16m4_tum(vbool4_t mask
, vuint16m4_t maskedoff
, vuint16m4_t src
, size_t offset
, size_t vl
) {
1087 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
1090 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vslidedown_vx_u16m8_tum
1091 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1092 // CHECK-RV64-NEXT: entry:
1093 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
1094 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1096 vuint16m8_t
test_vslidedown_vx_u16m8_tum(vbool2_t mask
, vuint16m8_t maskedoff
, vuint16m8_t src
, size_t offset
, size_t vl
) {
1097 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
1100 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vslidedown_vx_u32mf2_tum
1101 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1102 // CHECK-RV64-NEXT: entry:
1103 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
1104 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1106 vuint32mf2_t
test_vslidedown_vx_u32mf2_tum(vbool64_t mask
, vuint32mf2_t maskedoff
, vuint32mf2_t src
, size_t offset
, size_t vl
) {
1107 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
1110 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vslidedown_vx_u32m1_tum
1111 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1112 // CHECK-RV64-NEXT: entry:
1113 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
1114 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1116 vuint32m1_t
test_vslidedown_vx_u32m1_tum(vbool32_t mask
, vuint32m1_t maskedoff
, vuint32m1_t src
, size_t offset
, size_t vl
) {
1117 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
1120 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vslidedown_vx_u32m2_tum
1121 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1122 // CHECK-RV64-NEXT: entry:
1123 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
1124 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1126 vuint32m2_t
test_vslidedown_vx_u32m2_tum(vbool16_t mask
, vuint32m2_t maskedoff
, vuint32m2_t src
, size_t offset
, size_t vl
) {
1127 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
1130 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vslidedown_vx_u32m4_tum
1131 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1132 // CHECK-RV64-NEXT: entry:
1133 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
1134 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1136 vuint32m4_t
test_vslidedown_vx_u32m4_tum(vbool8_t mask
, vuint32m4_t maskedoff
, vuint32m4_t src
, size_t offset
, size_t vl
) {
1137 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
1140 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vslidedown_vx_u32m8_tum
1141 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1142 // CHECK-RV64-NEXT: entry:
1143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
1144 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1146 vuint32m8_t
test_vslidedown_vx_u32m8_tum(vbool4_t mask
, vuint32m8_t maskedoff
, vuint32m8_t src
, size_t offset
, size_t vl
) {
1147 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
1150 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vslidedown_vx_u64m1_tum
1151 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1152 // CHECK-RV64-NEXT: entry:
1153 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
1154 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1156 vuint64m1_t
test_vslidedown_vx_u64m1_tum(vbool64_t mask
, vuint64m1_t maskedoff
, vuint64m1_t src
, size_t offset
, size_t vl
) {
1157 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
1160 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vslidedown_vx_u64m2_tum
1161 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1162 // CHECK-RV64-NEXT: entry:
1163 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
1164 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1166 vuint64m2_t
test_vslidedown_vx_u64m2_tum(vbool32_t mask
, vuint64m2_t maskedoff
, vuint64m2_t src
, size_t offset
, size_t vl
) {
1167 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
1170 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vslidedown_vx_u64m4_tum
1171 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1172 // CHECK-RV64-NEXT: entry:
1173 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
1174 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1176 vuint64m4_t
test_vslidedown_vx_u64m4_tum(vbool16_t mask
, vuint64m4_t maskedoff
, vuint64m4_t src
, size_t offset
, size_t vl
) {
1177 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
1180 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vslidedown_vx_u64m8_tum
1181 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1182 // CHECK-RV64-NEXT: entry:
1183 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
1184 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1186 vuint64m8_t
test_vslidedown_vx_u64m8_tum(vbool8_t mask
, vuint64m8_t maskedoff
, vuint64m8_t src
, size_t offset
, size_t vl
) {
1187 return __riscv_vslidedown_tum(mask
, maskedoff
, src
, offset
, vl
);
1190 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vslidedown_vx_f16mf4_tumu
1191 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1192 // CHECK-RV64-NEXT: entry:
1193 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1194 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
1196 vfloat16mf4_t
test_vslidedown_vx_f16mf4_tumu(vbool64_t mask
, vfloat16mf4_t maskedoff
, vfloat16mf4_t src
, size_t offset
, size_t vl
) {
1197 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1200 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vslidedown_vx_f16mf2_tumu
1201 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1202 // CHECK-RV64-NEXT: entry:
1203 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1204 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
1206 vfloat16mf2_t
test_vslidedown_vx_f16mf2_tumu(vbool32_t mask
, vfloat16mf2_t maskedoff
, vfloat16mf2_t src
, size_t offset
, size_t vl
) {
1207 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1210 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vslidedown_vx_f16m1_tumu
1211 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1212 // CHECK-RV64-NEXT: entry:
1213 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1214 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
1216 vfloat16m1_t
test_vslidedown_vx_f16m1_tumu(vbool16_t mask
, vfloat16m1_t maskedoff
, vfloat16m1_t src
, size_t offset
, size_t vl
) {
1217 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1220 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vslidedown_vx_f16m2_tumu
1221 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1222 // CHECK-RV64-NEXT: entry:
1223 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1224 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
1226 vfloat16m2_t
test_vslidedown_vx_f16m2_tumu(vbool8_t mask
, vfloat16m2_t maskedoff
, vfloat16m2_t src
, size_t offset
, size_t vl
) {
1227 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1230 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vslidedown_vx_f16m4_tumu
1231 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1232 // CHECK-RV64-NEXT: entry:
1233 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
1234 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
1236 vfloat16m4_t
test_vslidedown_vx_f16m4_tumu(vbool4_t mask
, vfloat16m4_t maskedoff
, vfloat16m4_t src
, size_t offset
, size_t vl
) {
1237 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1240 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vslidedown_vx_f16m8_tumu
1241 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1242 // CHECK-RV64-NEXT: entry:
1243 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[SRC]], i64 [[OFFSET]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
1244 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
1246 vfloat16m8_t
test_vslidedown_vx_f16m8_tumu(vbool2_t mask
, vfloat16m8_t maskedoff
, vfloat16m8_t src
, size_t offset
, size_t vl
) {
1247 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1250 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vslidedown_vx_f32mf2_tumu
1251 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1252 // CHECK-RV64-NEXT: entry:
1253 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1254 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
1256 vfloat32mf2_t
test_vslidedown_vx_f32mf2_tumu(vbool64_t mask
, vfloat32mf2_t maskedoff
, vfloat32mf2_t src
, size_t offset
, size_t vl
) {
1257 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1260 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vslidedown_vx_f32m1_tumu
1261 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1262 // CHECK-RV64-NEXT: entry:
1263 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1264 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
1266 vfloat32m1_t
test_vslidedown_vx_f32m1_tumu(vbool32_t mask
, vfloat32m1_t maskedoff
, vfloat32m1_t src
, size_t offset
, size_t vl
) {
1267 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1270 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vslidedown_vx_f32m2_tumu
1271 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1272 // CHECK-RV64-NEXT: entry:
1273 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1274 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
1276 vfloat32m2_t
test_vslidedown_vx_f32m2_tumu(vbool16_t mask
, vfloat32m2_t maskedoff
, vfloat32m2_t src
, size_t offset
, size_t vl
) {
1277 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1280 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vslidedown_vx_f32m4_tumu
1281 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1282 // CHECK-RV64-NEXT: entry:
1283 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1284 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
1286 vfloat32m4_t
test_vslidedown_vx_f32m4_tumu(vbool8_t mask
, vfloat32m4_t maskedoff
, vfloat32m4_t src
, size_t offset
, size_t vl
) {
1287 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1290 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vslidedown_vx_f32m8_tumu
1291 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1292 // CHECK-RV64-NEXT: entry:
1293 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
1294 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
1296 vfloat32m8_t
test_vslidedown_vx_f32m8_tumu(vbool4_t mask
, vfloat32m8_t maskedoff
, vfloat32m8_t src
, size_t offset
, size_t vl
) {
1297 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1300 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vslidedown_vx_f64m1_tumu
1301 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1302 // CHECK-RV64-NEXT: entry:
1303 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1304 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
1306 vfloat64m1_t
test_vslidedown_vx_f64m1_tumu(vbool64_t mask
, vfloat64m1_t maskedoff
, vfloat64m1_t src
, size_t offset
, size_t vl
) {
1307 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1310 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vslidedown_vx_f64m2_tumu
1311 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1312 // CHECK-RV64-NEXT: entry:
1313 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1314 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
1316 vfloat64m2_t
test_vslidedown_vx_f64m2_tumu(vbool32_t mask
, vfloat64m2_t maskedoff
, vfloat64m2_t src
, size_t offset
, size_t vl
) {
1317 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1320 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vslidedown_vx_f64m4_tumu
1321 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1322 // CHECK-RV64-NEXT: entry:
1323 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1324 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
1326 vfloat64m4_t
test_vslidedown_vx_f64m4_tumu(vbool16_t mask
, vfloat64m4_t maskedoff
, vfloat64m4_t src
, size_t offset
, size_t vl
) {
1327 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1330 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vslidedown_vx_f64m8_tumu
1331 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1332 // CHECK-RV64-NEXT: entry:
1333 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1334 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
1336 vfloat64m8_t
test_vslidedown_vx_f64m8_tumu(vbool8_t mask
, vfloat64m8_t maskedoff
, vfloat64m8_t src
, size_t offset
, size_t vl
) {
1337 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1340 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vslidedown_vx_i8mf8_tumu
1341 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1342 // CHECK-RV64-NEXT: entry:
1343 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1344 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
1346 vint8mf8_t
test_vslidedown_vx_i8mf8_tumu(vbool64_t mask
, vint8mf8_t maskedoff
, vint8mf8_t src
, size_t offset
, size_t vl
) {
1347 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1350 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vslidedown_vx_i8mf4_tumu
1351 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1352 // CHECK-RV64-NEXT: entry:
1353 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1354 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
1356 vint8mf4_t
test_vslidedown_vx_i8mf4_tumu(vbool32_t mask
, vint8mf4_t maskedoff
, vint8mf4_t src
, size_t offset
, size_t vl
) {
1357 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1360 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vslidedown_vx_i8mf2_tumu
1361 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1362 // CHECK-RV64-NEXT: entry:
1363 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1364 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
1366 vint8mf2_t
test_vslidedown_vx_i8mf2_tumu(vbool16_t mask
, vint8mf2_t maskedoff
, vint8mf2_t src
, size_t offset
, size_t vl
) {
1367 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1370 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vslidedown_vx_i8m1_tumu
1371 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1372 // CHECK-RV64-NEXT: entry:
1373 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1374 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
1376 vint8m1_t
test_vslidedown_vx_i8m1_tumu(vbool8_t mask
, vint8m1_t maskedoff
, vint8m1_t src
, size_t offset
, size_t vl
) {
1377 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1380 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vslidedown_vx_i8m2_tumu
1381 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1382 // CHECK-RV64-NEXT: entry:
1383 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
1384 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
1386 vint8m2_t
test_vslidedown_vx_i8m2_tumu(vbool4_t mask
, vint8m2_t maskedoff
, vint8m2_t src
, size_t offset
, size_t vl
) {
1387 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1390 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vslidedown_vx_i8m4_tumu
1391 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1392 // CHECK-RV64-NEXT: entry:
1393 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
1394 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
1396 vint8m4_t
test_vslidedown_vx_i8m4_tumu(vbool2_t mask
, vint8m4_t maskedoff
, vint8m4_t src
, size_t offset
, size_t vl
) {
1397 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1400 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vslidedown_vx_i8m8_tumu
1401 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1402 // CHECK-RV64-NEXT: entry:
1403 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0)
1404 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
1406 vint8m8_t
test_vslidedown_vx_i8m8_tumu(vbool1_t mask
, vint8m8_t maskedoff
, vint8m8_t src
, size_t offset
, size_t vl
) {
1407 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1410 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vslidedown_vx_i16mf4_tumu
1411 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1412 // CHECK-RV64-NEXT: entry:
1413 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1414 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1416 vint16mf4_t
test_vslidedown_vx_i16mf4_tumu(vbool64_t mask
, vint16mf4_t maskedoff
, vint16mf4_t src
, size_t offset
, size_t vl
) {
1417 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1420 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vslidedown_vx_i16mf2_tumu
1421 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1422 // CHECK-RV64-NEXT: entry:
1423 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1424 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1426 vint16mf2_t
test_vslidedown_vx_i16mf2_tumu(vbool32_t mask
, vint16mf2_t maskedoff
, vint16mf2_t src
, size_t offset
, size_t vl
) {
1427 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1430 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vslidedown_vx_i16m1_tumu
1431 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1432 // CHECK-RV64-NEXT: entry:
1433 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1434 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1436 vint16m1_t
test_vslidedown_vx_i16m1_tumu(vbool16_t mask
, vint16m1_t maskedoff
, vint16m1_t src
, size_t offset
, size_t vl
) {
1437 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1440 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vslidedown_vx_i16m2_tumu
1441 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1442 // CHECK-RV64-NEXT: entry:
1443 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1444 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1446 vint16m2_t
test_vslidedown_vx_i16m2_tumu(vbool8_t mask
, vint16m2_t maskedoff
, vint16m2_t src
, size_t offset
, size_t vl
) {
1447 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1450 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vslidedown_vx_i16m4_tumu
1451 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1452 // CHECK-RV64-NEXT: entry:
1453 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
1454 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1456 vint16m4_t
test_vslidedown_vx_i16m4_tumu(vbool4_t mask
, vint16m4_t maskedoff
, vint16m4_t src
, size_t offset
, size_t vl
) {
1457 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1460 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vslidedown_vx_i16m8_tumu
1461 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1462 // CHECK-RV64-NEXT: entry:
1463 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
1464 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1466 vint16m8_t
test_vslidedown_vx_i16m8_tumu(vbool2_t mask
, vint16m8_t maskedoff
, vint16m8_t src
, size_t offset
, size_t vl
) {
1467 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1470 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vslidedown_vx_i32mf2_tumu
1471 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1472 // CHECK-RV64-NEXT: entry:
1473 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1474 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1476 vint32mf2_t
test_vslidedown_vx_i32mf2_tumu(vbool64_t mask
, vint32mf2_t maskedoff
, vint32mf2_t src
, size_t offset
, size_t vl
) {
1477 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1480 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vslidedown_vx_i32m1_tumu
1481 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1482 // CHECK-RV64-NEXT: entry:
1483 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1484 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1486 vint32m1_t
test_vslidedown_vx_i32m1_tumu(vbool32_t mask
, vint32m1_t maskedoff
, vint32m1_t src
, size_t offset
, size_t vl
) {
1487 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1490 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vslidedown_vx_i32m2_tumu
1491 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1492 // CHECK-RV64-NEXT: entry:
1493 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1494 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1496 vint32m2_t
test_vslidedown_vx_i32m2_tumu(vbool16_t mask
, vint32m2_t maskedoff
, vint32m2_t src
, size_t offset
, size_t vl
) {
1497 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1500 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vslidedown_vx_i32m4_tumu
1501 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1502 // CHECK-RV64-NEXT: entry:
1503 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1504 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1506 vint32m4_t
test_vslidedown_vx_i32m4_tumu(vbool8_t mask
, vint32m4_t maskedoff
, vint32m4_t src
, size_t offset
, size_t vl
) {
1507 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1510 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vslidedown_vx_i32m8_tumu
1511 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1512 // CHECK-RV64-NEXT: entry:
1513 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
1514 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1516 vint32m8_t
test_vslidedown_vx_i32m8_tumu(vbool4_t mask
, vint32m8_t maskedoff
, vint32m8_t src
, size_t offset
, size_t vl
) {
1517 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1520 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vslidedown_vx_i64m1_tumu
1521 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1522 // CHECK-RV64-NEXT: entry:
1523 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1524 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1526 vint64m1_t
test_vslidedown_vx_i64m1_tumu(vbool64_t mask
, vint64m1_t maskedoff
, vint64m1_t src
, size_t offset
, size_t vl
) {
1527 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1530 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vslidedown_vx_i64m2_tumu
1531 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1532 // CHECK-RV64-NEXT: entry:
1533 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1534 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1536 vint64m2_t
test_vslidedown_vx_i64m2_tumu(vbool32_t mask
, vint64m2_t maskedoff
, vint64m2_t src
, size_t offset
, size_t vl
) {
1537 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1540 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vslidedown_vx_i64m4_tumu
1541 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1542 // CHECK-RV64-NEXT: entry:
1543 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1544 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1546 vint64m4_t
test_vslidedown_vx_i64m4_tumu(vbool16_t mask
, vint64m4_t maskedoff
, vint64m4_t src
, size_t offset
, size_t vl
) {
1547 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1550 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vslidedown_vx_i64m8_tumu
1551 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1552 // CHECK-RV64-NEXT: entry:
1553 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1554 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1556 vint64m8_t
test_vslidedown_vx_i64m8_tumu(vbool8_t mask
, vint64m8_t maskedoff
, vint64m8_t src
, size_t offset
, size_t vl
) {
1557 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1560 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vslidedown_vx_u8mf8_tumu
1561 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1562 // CHECK-RV64-NEXT: entry:
1563 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1564 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
1566 vuint8mf8_t
test_vslidedown_vx_u8mf8_tumu(vbool64_t mask
, vuint8mf8_t maskedoff
, vuint8mf8_t src
, size_t offset
, size_t vl
) {
1567 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1570 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vslidedown_vx_u8mf4_tumu
1571 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1572 // CHECK-RV64-NEXT: entry:
1573 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1574 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
1576 vuint8mf4_t
test_vslidedown_vx_u8mf4_tumu(vbool32_t mask
, vuint8mf4_t maskedoff
, vuint8mf4_t src
, size_t offset
, size_t vl
) {
1577 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1580 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vslidedown_vx_u8mf2_tumu
1581 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1582 // CHECK-RV64-NEXT: entry:
1583 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1584 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
1586 vuint8mf2_t
test_vslidedown_vx_u8mf2_tumu(vbool16_t mask
, vuint8mf2_t maskedoff
, vuint8mf2_t src
, size_t offset
, size_t vl
) {
1587 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1590 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vslidedown_vx_u8m1_tumu
1591 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1592 // CHECK-RV64-NEXT: entry:
1593 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1594 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
1596 vuint8m1_t
test_vslidedown_vx_u8m1_tumu(vbool8_t mask
, vuint8m1_t maskedoff
, vuint8m1_t src
, size_t offset
, size_t vl
) {
1597 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1600 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vslidedown_vx_u8m2_tumu
1601 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1602 // CHECK-RV64-NEXT: entry:
1603 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
1604 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
1606 vuint8m2_t
test_vslidedown_vx_u8m2_tumu(vbool4_t mask
, vuint8m2_t maskedoff
, vuint8m2_t src
, size_t offset
, size_t vl
) {
1607 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1610 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vslidedown_vx_u8m4_tumu
1611 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1612 // CHECK-RV64-NEXT: entry:
1613 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
1614 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
1616 vuint8m4_t
test_vslidedown_vx_u8m4_tumu(vbool2_t mask
, vuint8m4_t maskedoff
, vuint8m4_t src
, size_t offset
, size_t vl
) {
1617 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1620 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vslidedown_vx_u8m8_tumu
1621 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1622 // CHECK-RV64-NEXT: entry:
1623 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0)
1624 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
1626 vuint8m8_t
test_vslidedown_vx_u8m8_tumu(vbool1_t mask
, vuint8m8_t maskedoff
, vuint8m8_t src
, size_t offset
, size_t vl
) {
1627 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1630 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vslidedown_vx_u16mf4_tumu
1631 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1632 // CHECK-RV64-NEXT: entry:
1633 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1634 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1636 vuint16mf4_t
test_vslidedown_vx_u16mf4_tumu(vbool64_t mask
, vuint16mf4_t maskedoff
, vuint16mf4_t src
, size_t offset
, size_t vl
) {
1637 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1640 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vslidedown_vx_u16mf2_tumu
1641 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1642 // CHECK-RV64-NEXT: entry:
1643 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1644 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1646 vuint16mf2_t
test_vslidedown_vx_u16mf2_tumu(vbool32_t mask
, vuint16mf2_t maskedoff
, vuint16mf2_t src
, size_t offset
, size_t vl
) {
1647 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1650 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vslidedown_vx_u16m1_tumu
1651 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1652 // CHECK-RV64-NEXT: entry:
1653 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1654 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1656 vuint16m1_t
test_vslidedown_vx_u16m1_tumu(vbool16_t mask
, vuint16m1_t maskedoff
, vuint16m1_t src
, size_t offset
, size_t vl
) {
1657 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1660 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vslidedown_vx_u16m2_tumu
1661 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1662 // CHECK-RV64-NEXT: entry:
1663 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1664 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1666 vuint16m2_t
test_vslidedown_vx_u16m2_tumu(vbool8_t mask
, vuint16m2_t maskedoff
, vuint16m2_t src
, size_t offset
, size_t vl
) {
1667 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1670 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vslidedown_vx_u16m4_tumu
1671 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1672 // CHECK-RV64-NEXT: entry:
1673 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
1674 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1676 vuint16m4_t
test_vslidedown_vx_u16m4_tumu(vbool4_t mask
, vuint16m4_t maskedoff
, vuint16m4_t src
, size_t offset
, size_t vl
) {
1677 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1680 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vslidedown_vx_u16m8_tumu
1681 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1682 // CHECK-RV64-NEXT: entry:
1683 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
1684 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1686 vuint16m8_t
test_vslidedown_vx_u16m8_tumu(vbool2_t mask
, vuint16m8_t maskedoff
, vuint16m8_t src
, size_t offset
, size_t vl
) {
1687 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1690 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vslidedown_vx_u32mf2_tumu
1691 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1692 // CHECK-RV64-NEXT: entry:
1693 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1694 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1696 vuint32mf2_t
test_vslidedown_vx_u32mf2_tumu(vbool64_t mask
, vuint32mf2_t maskedoff
, vuint32mf2_t src
, size_t offset
, size_t vl
) {
1697 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1700 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vslidedown_vx_u32m1_tumu
1701 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1702 // CHECK-RV64-NEXT: entry:
1703 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1704 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1706 vuint32m1_t
test_vslidedown_vx_u32m1_tumu(vbool32_t mask
, vuint32m1_t maskedoff
, vuint32m1_t src
, size_t offset
, size_t vl
) {
1707 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1710 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vslidedown_vx_u32m2_tumu
1711 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1712 // CHECK-RV64-NEXT: entry:
1713 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1714 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1716 vuint32m2_t
test_vslidedown_vx_u32m2_tumu(vbool16_t mask
, vuint32m2_t maskedoff
, vuint32m2_t src
, size_t offset
, size_t vl
) {
1717 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1720 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vslidedown_vx_u32m4_tumu
1721 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1722 // CHECK-RV64-NEXT: entry:
1723 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1724 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1726 vuint32m4_t
test_vslidedown_vx_u32m4_tumu(vbool8_t mask
, vuint32m4_t maskedoff
, vuint32m4_t src
, size_t offset
, size_t vl
) {
1727 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1730 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vslidedown_vx_u32m8_tumu
1731 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1732 // CHECK-RV64-NEXT: entry:
1733 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
1734 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1736 vuint32m8_t
test_vslidedown_vx_u32m8_tumu(vbool4_t mask
, vuint32m8_t maskedoff
, vuint32m8_t src
, size_t offset
, size_t vl
) {
1737 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1740 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vslidedown_vx_u64m1_tumu
1741 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1742 // CHECK-RV64-NEXT: entry:
1743 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1744 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1746 vuint64m1_t
test_vslidedown_vx_u64m1_tumu(vbool64_t mask
, vuint64m1_t maskedoff
, vuint64m1_t src
, size_t offset
, size_t vl
) {
1747 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1750 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vslidedown_vx_u64m2_tumu
1751 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1752 // CHECK-RV64-NEXT: entry:
1753 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1754 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1756 vuint64m2_t
test_vslidedown_vx_u64m2_tumu(vbool32_t mask
, vuint64m2_t maskedoff
, vuint64m2_t src
, size_t offset
, size_t vl
) {
1757 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1760 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vslidedown_vx_u64m4_tumu
1761 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1762 // CHECK-RV64-NEXT: entry:
1763 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1764 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1766 vuint64m4_t
test_vslidedown_vx_u64m4_tumu(vbool16_t mask
, vuint64m4_t maskedoff
, vuint64m4_t src
, size_t offset
, size_t vl
) {
1767 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1770 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vslidedown_vx_u64m8_tumu
1771 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1772 // CHECK-RV64-NEXT: entry:
1773 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1774 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1776 vuint64m8_t
test_vslidedown_vx_u64m8_tumu(vbool8_t mask
, vuint64m8_t maskedoff
, vuint64m8_t src
, size_t offset
, size_t vl
) {
1777 return __riscv_vslidedown_tumu(mask
, maskedoff
, src
, offset
, vl
);
1780 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vslidedown_vx_f16mf4_mu
1781 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1782 // CHECK-RV64-NEXT: entry:
1783 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
1784 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
1786 vfloat16mf4_t
test_vslidedown_vx_f16mf4_mu(vbool64_t mask
, vfloat16mf4_t maskedoff
, vfloat16mf4_t src
, size_t offset
, size_t vl
) {
1787 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1790 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vslidedown_vx_f16mf2_mu
1791 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1792 // CHECK-RV64-NEXT: entry:
1793 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
1794 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
1796 vfloat16mf2_t
test_vslidedown_vx_f16mf2_mu(vbool32_t mask
, vfloat16mf2_t maskedoff
, vfloat16mf2_t src
, size_t offset
, size_t vl
) {
1797 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1800 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vslidedown_vx_f16m1_mu
1801 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1802 // CHECK-RV64-NEXT: entry:
1803 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
1804 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
1806 vfloat16m1_t
test_vslidedown_vx_f16m1_mu(vbool16_t mask
, vfloat16m1_t maskedoff
, vfloat16m1_t src
, size_t offset
, size_t vl
) {
1807 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1810 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vslidedown_vx_f16m2_mu
1811 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1812 // CHECK-RV64-NEXT: entry:
1813 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
1814 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
1816 vfloat16m2_t
test_vslidedown_vx_f16m2_mu(vbool8_t mask
, vfloat16m2_t maskedoff
, vfloat16m2_t src
, size_t offset
, size_t vl
) {
1817 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1820 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vslidedown_vx_f16m4_mu
1821 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1822 // CHECK-RV64-NEXT: entry:
1823 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
1824 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
1826 vfloat16m4_t
test_vslidedown_vx_f16m4_mu(vbool4_t mask
, vfloat16m4_t maskedoff
, vfloat16m4_t src
, size_t offset
, size_t vl
) {
1827 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1830 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vslidedown_vx_f16m8_mu
1831 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1832 // CHECK-RV64-NEXT: entry:
1833 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[SRC]], i64 [[OFFSET]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
1834 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
1836 vfloat16m8_t
test_vslidedown_vx_f16m8_mu(vbool2_t mask
, vfloat16m8_t maskedoff
, vfloat16m8_t src
, size_t offset
, size_t vl
) {
1837 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1840 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vslidedown_vx_f32mf2_mu
1841 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1842 // CHECK-RV64-NEXT: entry:
1843 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
1844 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
1846 vfloat32mf2_t
test_vslidedown_vx_f32mf2_mu(vbool64_t mask
, vfloat32mf2_t maskedoff
, vfloat32mf2_t src
, size_t offset
, size_t vl
) {
1847 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1850 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vslidedown_vx_f32m1_mu
1851 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1852 // CHECK-RV64-NEXT: entry:
1853 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
1854 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
1856 vfloat32m1_t
test_vslidedown_vx_f32m1_mu(vbool32_t mask
, vfloat32m1_t maskedoff
, vfloat32m1_t src
, size_t offset
, size_t vl
) {
1857 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1860 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vslidedown_vx_f32m2_mu
1861 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1862 // CHECK-RV64-NEXT: entry:
1863 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
1864 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
1866 vfloat32m2_t
test_vslidedown_vx_f32m2_mu(vbool16_t mask
, vfloat32m2_t maskedoff
, vfloat32m2_t src
, size_t offset
, size_t vl
) {
1867 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1870 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vslidedown_vx_f32m4_mu
1871 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1872 // CHECK-RV64-NEXT: entry:
1873 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
1874 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
1876 vfloat32m4_t
test_vslidedown_vx_f32m4_mu(vbool8_t mask
, vfloat32m4_t maskedoff
, vfloat32m4_t src
, size_t offset
, size_t vl
) {
1877 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1880 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vslidedown_vx_f32m8_mu
1881 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1882 // CHECK-RV64-NEXT: entry:
1883 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
1884 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
1886 vfloat32m8_t
test_vslidedown_vx_f32m8_mu(vbool4_t mask
, vfloat32m8_t maskedoff
, vfloat32m8_t src
, size_t offset
, size_t vl
) {
1887 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1890 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vslidedown_vx_f64m1_mu
1891 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1892 // CHECK-RV64-NEXT: entry:
1893 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
1894 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
1896 vfloat64m1_t
test_vslidedown_vx_f64m1_mu(vbool64_t mask
, vfloat64m1_t maskedoff
, vfloat64m1_t src
, size_t offset
, size_t vl
) {
1897 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1900 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vslidedown_vx_f64m2_mu
1901 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1902 // CHECK-RV64-NEXT: entry:
1903 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
1904 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
1906 vfloat64m2_t
test_vslidedown_vx_f64m2_mu(vbool32_t mask
, vfloat64m2_t maskedoff
, vfloat64m2_t src
, size_t offset
, size_t vl
) {
1907 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1910 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vslidedown_vx_f64m4_mu
1911 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1912 // CHECK-RV64-NEXT: entry:
1913 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
1914 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
1916 vfloat64m4_t
test_vslidedown_vx_f64m4_mu(vbool16_t mask
, vfloat64m4_t maskedoff
, vfloat64m4_t src
, size_t offset
, size_t vl
) {
1917 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1920 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vslidedown_vx_f64m8_mu
1921 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1922 // CHECK-RV64-NEXT: entry:
1923 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
1924 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
1926 vfloat64m8_t
test_vslidedown_vx_f64m8_mu(vbool8_t mask
, vfloat64m8_t maskedoff
, vfloat64m8_t src
, size_t offset
, size_t vl
) {
1927 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1930 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vslidedown_vx_i8mf8_mu
1931 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1932 // CHECK-RV64-NEXT: entry:
1933 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
1934 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
1936 vint8mf8_t
test_vslidedown_vx_i8mf8_mu(vbool64_t mask
, vint8mf8_t maskedoff
, vint8mf8_t src
, size_t offset
, size_t vl
) {
1937 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1940 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vslidedown_vx_i8mf4_mu
1941 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1942 // CHECK-RV64-NEXT: entry:
1943 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
1944 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
1946 vint8mf4_t
test_vslidedown_vx_i8mf4_mu(vbool32_t mask
, vint8mf4_t maskedoff
, vint8mf4_t src
, size_t offset
, size_t vl
) {
1947 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1950 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vslidedown_vx_i8mf2_mu
1951 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1952 // CHECK-RV64-NEXT: entry:
1953 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
1954 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
1956 vint8mf2_t
test_vslidedown_vx_i8mf2_mu(vbool16_t mask
, vint8mf2_t maskedoff
, vint8mf2_t src
, size_t offset
, size_t vl
) {
1957 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1960 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vslidedown_vx_i8m1_mu
1961 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1962 // CHECK-RV64-NEXT: entry:
1963 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
1964 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
1966 vint8m1_t
test_vslidedown_vx_i8m1_mu(vbool8_t mask
, vint8m1_t maskedoff
, vint8m1_t src
, size_t offset
, size_t vl
) {
1967 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1970 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vslidedown_vx_i8m2_mu
1971 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1972 // CHECK-RV64-NEXT: entry:
1973 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
1974 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
1976 vint8m2_t
test_vslidedown_vx_i8m2_mu(vbool4_t mask
, vint8m2_t maskedoff
, vint8m2_t src
, size_t offset
, size_t vl
) {
1977 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1980 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vslidedown_vx_i8m4_mu
1981 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1982 // CHECK-RV64-NEXT: entry:
1983 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
1984 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
1986 vint8m4_t
test_vslidedown_vx_i8m4_mu(vbool2_t mask
, vint8m4_t maskedoff
, vint8m4_t src
, size_t offset
, size_t vl
) {
1987 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
1990 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vslidedown_vx_i8m8_mu
1991 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1992 // CHECK-RV64-NEXT: entry:
1993 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1)
1994 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
1996 vint8m8_t
test_vslidedown_vx_i8m8_mu(vbool1_t mask
, vint8m8_t maskedoff
, vint8m8_t src
, size_t offset
, size_t vl
) {
1997 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2000 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vslidedown_vx_i16mf4_mu
2001 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2002 // CHECK-RV64-NEXT: entry:
2003 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
2004 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
2006 vint16mf4_t
test_vslidedown_vx_i16mf4_mu(vbool64_t mask
, vint16mf4_t maskedoff
, vint16mf4_t src
, size_t offset
, size_t vl
) {
2007 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2010 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vslidedown_vx_i16mf2_mu
2011 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2012 // CHECK-RV64-NEXT: entry:
2013 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
2014 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
2016 vint16mf2_t
test_vslidedown_vx_i16mf2_mu(vbool32_t mask
, vint16mf2_t maskedoff
, vint16mf2_t src
, size_t offset
, size_t vl
) {
2017 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2020 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vslidedown_vx_i16m1_mu
2021 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2022 // CHECK-RV64-NEXT: entry:
2023 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
2024 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
2026 vint16m1_t
test_vslidedown_vx_i16m1_mu(vbool16_t mask
, vint16m1_t maskedoff
, vint16m1_t src
, size_t offset
, size_t vl
) {
2027 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2030 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vslidedown_vx_i16m2_mu
2031 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2032 // CHECK-RV64-NEXT: entry:
2033 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
2034 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2036 vint16m2_t
test_vslidedown_vx_i16m2_mu(vbool8_t mask
, vint16m2_t maskedoff
, vint16m2_t src
, size_t offset
, size_t vl
) {
2037 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2040 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vslidedown_vx_i16m4_mu
2041 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2042 // CHECK-RV64-NEXT: entry:
2043 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
2044 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2046 vint16m4_t
test_vslidedown_vx_i16m4_mu(vbool4_t mask
, vint16m4_t maskedoff
, vint16m4_t src
, size_t offset
, size_t vl
) {
2047 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2050 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vslidedown_vx_i16m8_mu
2051 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2052 // CHECK-RV64-NEXT: entry:
2053 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
2054 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2056 vint16m8_t
test_vslidedown_vx_i16m8_mu(vbool2_t mask
, vint16m8_t maskedoff
, vint16m8_t src
, size_t offset
, size_t vl
) {
2057 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2060 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vslidedown_vx_i32mf2_mu
2061 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2062 // CHECK-RV64-NEXT: entry:
2063 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
2064 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2066 vint32mf2_t
test_vslidedown_vx_i32mf2_mu(vbool64_t mask
, vint32mf2_t maskedoff
, vint32mf2_t src
, size_t offset
, size_t vl
) {
2067 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2070 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vslidedown_vx_i32m1_mu
2071 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2072 // CHECK-RV64-NEXT: entry:
2073 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
2074 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2076 vint32m1_t
test_vslidedown_vx_i32m1_mu(vbool32_t mask
, vint32m1_t maskedoff
, vint32m1_t src
, size_t offset
, size_t vl
) {
2077 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2080 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vslidedown_vx_i32m2_mu
2081 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2082 // CHECK-RV64-NEXT: entry:
2083 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
2084 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2086 vint32m2_t
test_vslidedown_vx_i32m2_mu(vbool16_t mask
, vint32m2_t maskedoff
, vint32m2_t src
, size_t offset
, size_t vl
) {
2087 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2090 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vslidedown_vx_i32m4_mu
2091 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2092 // CHECK-RV64-NEXT: entry:
2093 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
2094 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2096 vint32m4_t
test_vslidedown_vx_i32m4_mu(vbool8_t mask
, vint32m4_t maskedoff
, vint32m4_t src
, size_t offset
, size_t vl
) {
2097 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2100 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vslidedown_vx_i32m8_mu
2101 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2102 // CHECK-RV64-NEXT: entry:
2103 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
2104 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2106 vint32m8_t
test_vslidedown_vx_i32m8_mu(vbool4_t mask
, vint32m8_t maskedoff
, vint32m8_t src
, size_t offset
, size_t vl
) {
2107 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2110 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vslidedown_vx_i64m1_mu
2111 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2112 // CHECK-RV64-NEXT: entry:
2113 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
2114 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2116 vint64m1_t
test_vslidedown_vx_i64m1_mu(vbool64_t mask
, vint64m1_t maskedoff
, vint64m1_t src
, size_t offset
, size_t vl
) {
2117 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2120 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vslidedown_vx_i64m2_mu
2121 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2122 // CHECK-RV64-NEXT: entry:
2123 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
2124 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2126 vint64m2_t
test_vslidedown_vx_i64m2_mu(vbool32_t mask
, vint64m2_t maskedoff
, vint64m2_t src
, size_t offset
, size_t vl
) {
2127 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2130 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vslidedown_vx_i64m4_mu
2131 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2132 // CHECK-RV64-NEXT: entry:
2133 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
2134 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2136 vint64m4_t
test_vslidedown_vx_i64m4_mu(vbool16_t mask
, vint64m4_t maskedoff
, vint64m4_t src
, size_t offset
, size_t vl
) {
2137 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2140 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vslidedown_vx_i64m8_mu
2141 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2142 // CHECK-RV64-NEXT: entry:
2143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
2144 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2146 vint64m8_t
test_vslidedown_vx_i64m8_mu(vbool8_t mask
, vint64m8_t maskedoff
, vint64m8_t src
, size_t offset
, size_t vl
) {
2147 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2150 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vslidedown_vx_u8mf8_mu
2151 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2152 // CHECK-RV64-NEXT: entry:
2153 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
2154 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
2156 vuint8mf8_t
test_vslidedown_vx_u8mf8_mu(vbool64_t mask
, vuint8mf8_t maskedoff
, vuint8mf8_t src
, size_t offset
, size_t vl
) {
2157 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2160 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vslidedown_vx_u8mf4_mu
2161 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2162 // CHECK-RV64-NEXT: entry:
2163 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
2164 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
2166 vuint8mf4_t
test_vslidedown_vx_u8mf4_mu(vbool32_t mask
, vuint8mf4_t maskedoff
, vuint8mf4_t src
, size_t offset
, size_t vl
) {
2167 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2170 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vslidedown_vx_u8mf2_mu
2171 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2172 // CHECK-RV64-NEXT: entry:
2173 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
2174 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
2176 vuint8mf2_t
test_vslidedown_vx_u8mf2_mu(vbool16_t mask
, vuint8mf2_t maskedoff
, vuint8mf2_t src
, size_t offset
, size_t vl
) {
2177 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2180 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vslidedown_vx_u8m1_mu
2181 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2182 // CHECK-RV64-NEXT: entry:
2183 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
2184 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
2186 vuint8m1_t
test_vslidedown_vx_u8m1_mu(vbool8_t mask
, vuint8m1_t maskedoff
, vuint8m1_t src
, size_t offset
, size_t vl
) {
2187 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2190 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vslidedown_vx_u8m2_mu
2191 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2192 // CHECK-RV64-NEXT: entry:
2193 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
2194 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
2196 vuint8m2_t
test_vslidedown_vx_u8m2_mu(vbool4_t mask
, vuint8m2_t maskedoff
, vuint8m2_t src
, size_t offset
, size_t vl
) {
2197 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2200 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vslidedown_vx_u8m4_mu
2201 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2202 // CHECK-RV64-NEXT: entry:
2203 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
2204 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
2206 vuint8m4_t
test_vslidedown_vx_u8m4_mu(vbool2_t mask
, vuint8m4_t maskedoff
, vuint8m4_t src
, size_t offset
, size_t vl
) {
2207 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2210 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vslidedown_vx_u8m8_mu
2211 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2212 // CHECK-RV64-NEXT: entry:
2213 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1)
2214 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
2216 vuint8m8_t
test_vslidedown_vx_u8m8_mu(vbool1_t mask
, vuint8m8_t maskedoff
, vuint8m8_t src
, size_t offset
, size_t vl
) {
2217 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2220 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vslidedown_vx_u16mf4_mu
2221 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2222 // CHECK-RV64-NEXT: entry:
2223 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
2224 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
2226 vuint16mf4_t
test_vslidedown_vx_u16mf4_mu(vbool64_t mask
, vuint16mf4_t maskedoff
, vuint16mf4_t src
, size_t offset
, size_t vl
) {
2227 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2230 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vslidedown_vx_u16mf2_mu
2231 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2232 // CHECK-RV64-NEXT: entry:
2233 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
2234 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
2236 vuint16mf2_t
test_vslidedown_vx_u16mf2_mu(vbool32_t mask
, vuint16mf2_t maskedoff
, vuint16mf2_t src
, size_t offset
, size_t vl
) {
2237 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2240 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vslidedown_vx_u16m1_mu
2241 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2242 // CHECK-RV64-NEXT: entry:
2243 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
2244 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
2246 vuint16m1_t
test_vslidedown_vx_u16m1_mu(vbool16_t mask
, vuint16m1_t maskedoff
, vuint16m1_t src
, size_t offset
, size_t vl
) {
2247 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2250 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vslidedown_vx_u16m2_mu
2251 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2252 // CHECK-RV64-NEXT: entry:
2253 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
2254 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2256 vuint16m2_t
test_vslidedown_vx_u16m2_mu(vbool8_t mask
, vuint16m2_t maskedoff
, vuint16m2_t src
, size_t offset
, size_t vl
) {
2257 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2260 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vslidedown_vx_u16m4_mu
2261 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2262 // CHECK-RV64-NEXT: entry:
2263 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
2264 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2266 vuint16m4_t
test_vslidedown_vx_u16m4_mu(vbool4_t mask
, vuint16m4_t maskedoff
, vuint16m4_t src
, size_t offset
, size_t vl
) {
2267 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2270 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vslidedown_vx_u16m8_mu
2271 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2272 // CHECK-RV64-NEXT: entry:
2273 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
2274 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2276 vuint16m8_t
test_vslidedown_vx_u16m8_mu(vbool2_t mask
, vuint16m8_t maskedoff
, vuint16m8_t src
, size_t offset
, size_t vl
) {
2277 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2280 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vslidedown_vx_u32mf2_mu
2281 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2282 // CHECK-RV64-NEXT: entry:
2283 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
2284 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2286 vuint32mf2_t
test_vslidedown_vx_u32mf2_mu(vbool64_t mask
, vuint32mf2_t maskedoff
, vuint32mf2_t src
, size_t offset
, size_t vl
) {
2287 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2290 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vslidedown_vx_u32m1_mu
2291 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2292 // CHECK-RV64-NEXT: entry:
2293 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
2294 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2296 vuint32m1_t
test_vslidedown_vx_u32m1_mu(vbool32_t mask
, vuint32m1_t maskedoff
, vuint32m1_t src
, size_t offset
, size_t vl
) {
2297 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2300 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vslidedown_vx_u32m2_mu
2301 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2302 // CHECK-RV64-NEXT: entry:
2303 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
2304 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2306 vuint32m2_t
test_vslidedown_vx_u32m2_mu(vbool16_t mask
, vuint32m2_t maskedoff
, vuint32m2_t src
, size_t offset
, size_t vl
) {
2307 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2310 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vslidedown_vx_u32m4_mu
2311 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2312 // CHECK-RV64-NEXT: entry:
2313 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
2314 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2316 vuint32m4_t
test_vslidedown_vx_u32m4_mu(vbool8_t mask
, vuint32m4_t maskedoff
, vuint32m4_t src
, size_t offset
, size_t vl
) {
2317 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2320 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vslidedown_vx_u32m8_mu
2321 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2322 // CHECK-RV64-NEXT: entry:
2323 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
2324 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2326 vuint32m8_t
test_vslidedown_vx_u32m8_mu(vbool4_t mask
, vuint32m8_t maskedoff
, vuint32m8_t src
, size_t offset
, size_t vl
) {
2327 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2330 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vslidedown_vx_u64m1_mu
2331 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2332 // CHECK-RV64-NEXT: entry:
2333 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
2334 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2336 vuint64m1_t
test_vslidedown_vx_u64m1_mu(vbool64_t mask
, vuint64m1_t maskedoff
, vuint64m1_t src
, size_t offset
, size_t vl
) {
2337 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2340 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vslidedown_vx_u64m2_mu
2341 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2342 // CHECK-RV64-NEXT: entry:
2343 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
2344 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2346 vuint64m2_t
test_vslidedown_vx_u64m2_mu(vbool32_t mask
, vuint64m2_t maskedoff
, vuint64m2_t src
, size_t offset
, size_t vl
) {
2347 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2350 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vslidedown_vx_u64m4_mu
2351 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2352 // CHECK-RV64-NEXT: entry:
2353 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
2354 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2356 vuint64m4_t
test_vslidedown_vx_u64m4_mu(vbool16_t mask
, vuint64m4_t maskedoff
, vuint64m4_t src
, size_t offset
, size_t vl
) {
2357 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);
2360 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vslidedown_vx_u64m8_mu
2361 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2362 // CHECK-RV64-NEXT: entry:
2363 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
2364 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2366 vuint64m8_t
test_vslidedown_vx_u64m8_mu(vbool8_t mask
, vuint64m8_t maskedoff
, vuint64m8_t src
, size_t offset
, size_t vl
) {
2367 return __riscv_vslidedown_mu(mask
, maskedoff
, src
, offset
, vl
);