1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
4 // RUN: -target-feature +zvfh -disable-O0-optnone \
5 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
6 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
8 #include <riscv_vector.h>
10 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vlmul_trunc_v_f16mf2_f16mf4
11 // CHECK-RV64-SAME: (<vscale x 2 x half> [[OP1:%.*]]) #[[ATTR0:[0-9]+]] {
12 // CHECK-RV64-NEXT: entry:
13 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.vector.extract.nxv1f16.nxv2f16(<vscale x 2 x half> [[OP1]], i64 0)
14 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
16 vfloat16mf4_t
test_vlmul_trunc_v_f16mf2_f16mf4(vfloat16mf2_t op1
) {
17 return __riscv_vlmul_trunc_v_f16mf2_f16mf4(op1
);
20 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vlmul_trunc_v_f16m1_f16mf4
21 // CHECK-RV64-SAME: (<vscale x 4 x half> [[OP1:%.*]]) #[[ATTR0]] {
22 // CHECK-RV64-NEXT: entry:
23 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.vector.extract.nxv1f16.nxv4f16(<vscale x 4 x half> [[OP1]], i64 0)
24 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
26 vfloat16mf4_t
test_vlmul_trunc_v_f16m1_f16mf4(vfloat16m1_t op1
) {
27 return __riscv_vlmul_trunc_v_f16m1_f16mf4(op1
);
30 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vlmul_trunc_v_f16m1_f16mf2
31 // CHECK-RV64-SAME: (<vscale x 4 x half> [[OP1:%.*]]) #[[ATTR0]] {
32 // CHECK-RV64-NEXT: entry:
33 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv4f16(<vscale x 4 x half> [[OP1]], i64 0)
34 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
36 vfloat16mf2_t
test_vlmul_trunc_v_f16m1_f16mf2(vfloat16m1_t op1
) {
37 return __riscv_vlmul_trunc_v_f16m1_f16mf2(op1
);
40 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vlmul_trunc_v_f16m2_f16mf4
41 // CHECK-RV64-SAME: (<vscale x 8 x half> [[OP1:%.*]]) #[[ATTR0]] {
42 // CHECK-RV64-NEXT: entry:
43 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.vector.extract.nxv1f16.nxv8f16(<vscale x 8 x half> [[OP1]], i64 0)
44 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
46 vfloat16mf4_t
test_vlmul_trunc_v_f16m2_f16mf4(vfloat16m2_t op1
) {
47 return __riscv_vlmul_trunc_v_f16m2_f16mf4(op1
);
50 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vlmul_trunc_v_f16m2_f16mf2
51 // CHECK-RV64-SAME: (<vscale x 8 x half> [[OP1:%.*]]) #[[ATTR0]] {
52 // CHECK-RV64-NEXT: entry:
53 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv8f16(<vscale x 8 x half> [[OP1]], i64 0)
54 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
56 vfloat16mf2_t
test_vlmul_trunc_v_f16m2_f16mf2(vfloat16m2_t op1
) {
57 return __riscv_vlmul_trunc_v_f16m2_f16mf2(op1
);
60 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vlmul_trunc_v_f16m2_f16m1
61 // CHECK-RV64-SAME: (<vscale x 8 x half> [[OP1:%.*]]) #[[ATTR0]] {
62 // CHECK-RV64-NEXT: entry:
63 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.vector.extract.nxv4f16.nxv8f16(<vscale x 8 x half> [[OP1]], i64 0)
64 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
66 vfloat16m1_t
test_vlmul_trunc_v_f16m2_f16m1(vfloat16m2_t op1
) {
67 return __riscv_vlmul_trunc_v_f16m2_f16m1(op1
);
70 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vlmul_trunc_v_f16m4_f16mf4
71 // CHECK-RV64-SAME: (<vscale x 16 x half> [[OP1:%.*]]) #[[ATTR0]] {
72 // CHECK-RV64-NEXT: entry:
73 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.vector.extract.nxv1f16.nxv16f16(<vscale x 16 x half> [[OP1]], i64 0)
74 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
76 vfloat16mf4_t
test_vlmul_trunc_v_f16m4_f16mf4(vfloat16m4_t op1
) {
77 return __riscv_vlmul_trunc_v_f16m4_f16mf4(op1
);
80 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vlmul_trunc_v_f16m4_f16mf2
81 // CHECK-RV64-SAME: (<vscale x 16 x half> [[OP1:%.*]]) #[[ATTR0]] {
82 // CHECK-RV64-NEXT: entry:
83 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> [[OP1]], i64 0)
84 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
86 vfloat16mf2_t
test_vlmul_trunc_v_f16m4_f16mf2(vfloat16m4_t op1
) {
87 return __riscv_vlmul_trunc_v_f16m4_f16mf2(op1
);
90 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vlmul_trunc_v_f16m4_f16m1
91 // CHECK-RV64-SAME: (<vscale x 16 x half> [[OP1:%.*]]) #[[ATTR0]] {
92 // CHECK-RV64-NEXT: entry:
93 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.vector.extract.nxv4f16.nxv16f16(<vscale x 16 x half> [[OP1]], i64 0)
94 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
96 vfloat16m1_t
test_vlmul_trunc_v_f16m4_f16m1(vfloat16m4_t op1
) {
97 return __riscv_vlmul_trunc_v_f16m4_f16m1(op1
);
100 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vlmul_trunc_v_f16m4_f16m2
101 // CHECK-RV64-SAME: (<vscale x 16 x half> [[OP1:%.*]]) #[[ATTR0]] {
102 // CHECK-RV64-NEXT: entry:
103 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv16f16(<vscale x 16 x half> [[OP1]], i64 0)
104 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
106 vfloat16m2_t
test_vlmul_trunc_v_f16m4_f16m2(vfloat16m4_t op1
) {
107 return __riscv_vlmul_trunc_v_f16m4_f16m2(op1
);
110 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vlmul_trunc_v_f16m8_f16mf4
111 // CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]]) #[[ATTR0]] {
112 // CHECK-RV64-NEXT: entry:
113 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.vector.extract.nxv1f16.nxv32f16(<vscale x 32 x half> [[OP1]], i64 0)
114 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
116 vfloat16mf4_t
test_vlmul_trunc_v_f16m8_f16mf4(vfloat16m8_t op1
) {
117 return __riscv_vlmul_trunc_v_f16m8_f16mf4(op1
);
120 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vlmul_trunc_v_f16m8_f16mf2
121 // CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]]) #[[ATTR0]] {
122 // CHECK-RV64-NEXT: entry:
123 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv32f16(<vscale x 32 x half> [[OP1]], i64 0)
124 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
126 vfloat16mf2_t
test_vlmul_trunc_v_f16m8_f16mf2(vfloat16m8_t op1
) {
127 return __riscv_vlmul_trunc_v_f16m8_f16mf2(op1
);
130 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vlmul_trunc_v_f16m8_f16m1
131 // CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]]) #[[ATTR0]] {
132 // CHECK-RV64-NEXT: entry:
133 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.vector.extract.nxv4f16.nxv32f16(<vscale x 32 x half> [[OP1]], i64 0)
134 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
136 vfloat16m1_t
test_vlmul_trunc_v_f16m8_f16m1(vfloat16m8_t op1
) {
137 return __riscv_vlmul_trunc_v_f16m8_f16m1(op1
);
140 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vlmul_trunc_v_f16m8_f16m2
141 // CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]]) #[[ATTR0]] {
142 // CHECK-RV64-NEXT: entry:
143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[OP1]], i64 0)
144 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
146 vfloat16m2_t
test_vlmul_trunc_v_f16m8_f16m2(vfloat16m8_t op1
) {
147 return __riscv_vlmul_trunc_v_f16m8_f16m2(op1
);
150 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vlmul_trunc_v_f16m8_f16m4
151 // CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]]) #[[ATTR0]] {
152 // CHECK-RV64-NEXT: entry:
153 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.vector.extract.nxv16f16.nxv32f16(<vscale x 32 x half> [[OP1]], i64 0)
154 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
156 vfloat16m4_t
test_vlmul_trunc_v_f16m8_f16m4(vfloat16m8_t op1
) {
157 return __riscv_vlmul_trunc_v_f16m8_f16m4(op1
);
160 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vlmul_trunc_v_f32m1_f32mf2
161 // CHECK-RV64-SAME: (<vscale x 2 x float> [[OP1:%.*]]) #[[ATTR0]] {
162 // CHECK-RV64-NEXT: entry:
163 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.vector.extract.nxv1f32.nxv2f32(<vscale x 2 x float> [[OP1]], i64 0)
164 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
166 vfloat32mf2_t
test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1
) {
167 return __riscv_vlmul_trunc_v_f32m1_f32mf2(op1
);
170 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vlmul_trunc_v_f32m2_f32mf2
171 // CHECK-RV64-SAME: (<vscale x 4 x float> [[OP1:%.*]]) #[[ATTR0]] {
172 // CHECK-RV64-NEXT: entry:
173 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.vector.extract.nxv1f32.nxv4f32(<vscale x 4 x float> [[OP1]], i64 0)
174 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
176 vfloat32mf2_t
test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1
) {
177 return __riscv_vlmul_trunc_v_f32m2_f32mf2(op1
);
180 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vlmul_trunc_v_f32m2_f32m1
181 // CHECK-RV64-SAME: (<vscale x 4 x float> [[OP1:%.*]]) #[[ATTR0]] {
182 // CHECK-RV64-NEXT: entry:
183 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> [[OP1]], i64 0)
184 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
186 vfloat32m1_t
test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1
) {
187 return __riscv_vlmul_trunc_v_f32m2_f32m1(op1
);
190 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vlmul_trunc_v_f32m4_f32mf2
191 // CHECK-RV64-SAME: (<vscale x 8 x float> [[OP1:%.*]]) #[[ATTR0]] {
192 // CHECK-RV64-NEXT: entry:
193 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.vector.extract.nxv1f32.nxv8f32(<vscale x 8 x float> [[OP1]], i64 0)
194 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
196 vfloat32mf2_t
test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1
) {
197 return __riscv_vlmul_trunc_v_f32m4_f32mf2(op1
);
200 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vlmul_trunc_v_f32m4_f32m1
201 // CHECK-RV64-SAME: (<vscale x 8 x float> [[OP1:%.*]]) #[[ATTR0]] {
202 // CHECK-RV64-NEXT: entry:
203 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv8f32(<vscale x 8 x float> [[OP1]], i64 0)
204 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
206 vfloat32m1_t
test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1
) {
207 return __riscv_vlmul_trunc_v_f32m4_f32m1(op1
);
210 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vlmul_trunc_v_f32m4_f32m2
211 // CHECK-RV64-SAME: (<vscale x 8 x float> [[OP1:%.*]]) #[[ATTR0]] {
212 // CHECK-RV64-NEXT: entry:
213 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[OP1]], i64 0)
214 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
216 vfloat32m2_t
test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1
) {
217 return __riscv_vlmul_trunc_v_f32m4_f32m2(op1
);
220 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vlmul_trunc_v_f32m8_f32mf2
221 // CHECK-RV64-SAME: (<vscale x 16 x float> [[OP1:%.*]]) #[[ATTR0]] {
222 // CHECK-RV64-NEXT: entry:
223 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.vector.extract.nxv1f32.nxv16f32(<vscale x 16 x float> [[OP1]], i64 0)
224 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
226 vfloat32mf2_t
test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1
) {
227 return __riscv_vlmul_trunc_v_f32m8_f32mf2(op1
);
230 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vlmul_trunc_v_f32m8_f32m1
231 // CHECK-RV64-SAME: (<vscale x 16 x float> [[OP1:%.*]]) #[[ATTR0]] {
232 // CHECK-RV64-NEXT: entry:
233 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv16f32(<vscale x 16 x float> [[OP1]], i64 0)
234 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
236 vfloat32m1_t
test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1
) {
237 return __riscv_vlmul_trunc_v_f32m8_f32m1(op1
);
240 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vlmul_trunc_v_f32m8_f32m2
241 // CHECK-RV64-SAME: (<vscale x 16 x float> [[OP1:%.*]]) #[[ATTR0]] {
242 // CHECK-RV64-NEXT: entry:
243 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[OP1]], i64 0)
244 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
246 vfloat32m2_t
test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1
) {
247 return __riscv_vlmul_trunc_v_f32m8_f32m2(op1
);
250 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vlmul_trunc_v_f32m8_f32m4
251 // CHECK-RV64-SAME: (<vscale x 16 x float> [[OP1:%.*]]) #[[ATTR0]] {
252 // CHECK-RV64-NEXT: entry:
253 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.vector.extract.nxv8f32.nxv16f32(<vscale x 16 x float> [[OP1]], i64 0)
254 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
256 vfloat32m4_t
test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1
) {
257 return __riscv_vlmul_trunc_v_f32m8_f32m4(op1
);
260 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vlmul_trunc_v_f64m2_f64m1
261 // CHECK-RV64-SAME: (<vscale x 2 x double> [[OP1:%.*]]) #[[ATTR0]] {
262 // CHECK-RV64-NEXT: entry:
263 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.vector.extract.nxv1f64.nxv2f64(<vscale x 2 x double> [[OP1]], i64 0)
264 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
266 vfloat64m1_t
test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1
) {
267 return __riscv_vlmul_trunc_v_f64m2_f64m1(op1
);
270 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vlmul_trunc_v_f64m4_f64m1
271 // CHECK-RV64-SAME: (<vscale x 4 x double> [[OP1:%.*]]) #[[ATTR0]] {
272 // CHECK-RV64-NEXT: entry:
273 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.vector.extract.nxv1f64.nxv4f64(<vscale x 4 x double> [[OP1]], i64 0)
274 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
276 vfloat64m1_t
test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1
) {
277 return __riscv_vlmul_trunc_v_f64m4_f64m1(op1
);
280 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vlmul_trunc_v_f64m4_f64m2
281 // CHECK-RV64-SAME: (<vscale x 4 x double> [[OP1:%.*]]) #[[ATTR0]] {
282 // CHECK-RV64-NEXT: entry:
283 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[OP1]], i64 0)
284 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
286 vfloat64m2_t
test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1
) {
287 return __riscv_vlmul_trunc_v_f64m4_f64m2(op1
);
290 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vlmul_trunc_v_f64m8_f64m1
291 // CHECK-RV64-SAME: (<vscale x 8 x double> [[OP1:%.*]]) #[[ATTR0]] {
292 // CHECK-RV64-NEXT: entry:
293 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.vector.extract.nxv1f64.nxv8f64(<vscale x 8 x double> [[OP1]], i64 0)
294 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
296 vfloat64m1_t
test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1
) {
297 return __riscv_vlmul_trunc_v_f64m8_f64m1(op1
);
300 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vlmul_trunc_v_f64m8_f64m2
301 // CHECK-RV64-SAME: (<vscale x 8 x double> [[OP1:%.*]]) #[[ATTR0]] {
302 // CHECK-RV64-NEXT: entry:
303 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[OP1]], i64 0)
304 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
306 vfloat64m2_t
test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1
) {
307 return __riscv_vlmul_trunc_v_f64m8_f64m2(op1
);
310 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vlmul_trunc_v_f64m8_f64m4
311 // CHECK-RV64-SAME: (<vscale x 8 x double> [[OP1:%.*]]) #[[ATTR0]] {
312 // CHECK-RV64-NEXT: entry:
313 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.vector.extract.nxv4f64.nxv8f64(<vscale x 8 x double> [[OP1]], i64 0)
314 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
316 vfloat64m4_t
test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1
) {
317 return __riscv_vlmul_trunc_v_f64m8_f64m4(op1
);
320 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vlmul_trunc_v_i8mf4_i8mf8
321 // CHECK-RV64-SAME: (<vscale x 2 x i8> [[OP1:%.*]]) #[[ATTR0]] {
322 // CHECK-RV64-NEXT: entry:
323 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv2i8(<vscale x 2 x i8> [[OP1]], i64 0)
324 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
326 vint8mf8_t
test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1
) {
327 return __riscv_vlmul_trunc_v_i8mf4_i8mf8(op1
);
330 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vlmul_trunc_v_i8mf2_i8mf8
331 // CHECK-RV64-SAME: (<vscale x 4 x i8> [[OP1:%.*]]) #[[ATTR0]] {
332 // CHECK-RV64-NEXT: entry:
333 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv4i8(<vscale x 4 x i8> [[OP1]], i64 0)
334 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
336 vint8mf8_t
test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1
) {
337 return __riscv_vlmul_trunc_v_i8mf2_i8mf8(op1
);
340 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vlmul_trunc_v_i8mf2_i8mf4
341 // CHECK-RV64-SAME: (<vscale x 4 x i8> [[OP1:%.*]]) #[[ATTR0]] {
342 // CHECK-RV64-NEXT: entry:
343 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv4i8(<vscale x 4 x i8> [[OP1]], i64 0)
344 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
346 vint8mf4_t
test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1
) {
347 return __riscv_vlmul_trunc_v_i8mf2_i8mf4(op1
);
350 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vlmul_trunc_v_i8m1_i8mf8
351 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[OP1:%.*]]) #[[ATTR0]] {
352 // CHECK-RV64-NEXT: entry:
353 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv8i8(<vscale x 8 x i8> [[OP1]], i64 0)
354 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
356 vint8mf8_t
test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1
) {
357 return __riscv_vlmul_trunc_v_i8m1_i8mf8(op1
);
360 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vlmul_trunc_v_i8m1_i8mf4
361 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[OP1:%.*]]) #[[ATTR0]] {
362 // CHECK-RV64-NEXT: entry:
363 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv8i8(<vscale x 8 x i8> [[OP1]], i64 0)
364 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
366 vint8mf4_t
test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1
) {
367 return __riscv_vlmul_trunc_v_i8m1_i8mf4(op1
);
370 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vlmul_trunc_v_i8m1_i8mf2
371 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[OP1:%.*]]) #[[ATTR0]] {
372 // CHECK-RV64-NEXT: entry:
373 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv8i8(<vscale x 8 x i8> [[OP1]], i64 0)
374 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
376 vint8mf2_t
test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1
) {
377 return __riscv_vlmul_trunc_v_i8m1_i8mf2(op1
);
380 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vlmul_trunc_v_i8m2_i8mf8
381 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]]) #[[ATTR0]] {
382 // CHECK-RV64-NEXT: entry:
383 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv16i8(<vscale x 16 x i8> [[OP1]], i64 0)
384 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
386 vint8mf8_t
test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1
) {
387 return __riscv_vlmul_trunc_v_i8m2_i8mf8(op1
);
390 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vlmul_trunc_v_i8m2_i8mf4
391 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]]) #[[ATTR0]] {
392 // CHECK-RV64-NEXT: entry:
393 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> [[OP1]], i64 0)
394 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
396 vint8mf4_t
test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1
) {
397 return __riscv_vlmul_trunc_v_i8m2_i8mf4(op1
);
400 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vlmul_trunc_v_i8m2_i8mf2
401 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]]) #[[ATTR0]] {
402 // CHECK-RV64-NEXT: entry:
403 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv16i8(<vscale x 16 x i8> [[OP1]], i64 0)
404 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
406 vint8mf2_t
test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1
) {
407 return __riscv_vlmul_trunc_v_i8m2_i8mf2(op1
);
410 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vlmul_trunc_v_i8m2_i8m1
411 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]]) #[[ATTR0]] {
412 // CHECK-RV64-NEXT: entry:
413 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[OP1]], i64 0)
414 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
416 vint8m1_t
test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1
) {
417 return __riscv_vlmul_trunc_v_i8m2_i8m1(op1
);
420 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vlmul_trunc_v_i8m4_i8mf8
421 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]]) #[[ATTR0]] {
422 // CHECK-RV64-NEXT: entry:
423 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv32i8(<vscale x 32 x i8> [[OP1]], i64 0)
424 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
426 vint8mf8_t
test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1
) {
427 return __riscv_vlmul_trunc_v_i8m4_i8mf8(op1
);
430 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vlmul_trunc_v_i8m4_i8mf4
431 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]]) #[[ATTR0]] {
432 // CHECK-RV64-NEXT: entry:
433 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> [[OP1]], i64 0)
434 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
436 vint8mf4_t
test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1
) {
437 return __riscv_vlmul_trunc_v_i8m4_i8mf4(op1
);
440 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vlmul_trunc_v_i8m4_i8mf2
441 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]]) #[[ATTR0]] {
442 // CHECK-RV64-NEXT: entry:
443 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv32i8(<vscale x 32 x i8> [[OP1]], i64 0)
444 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
446 vint8mf2_t
test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1
) {
447 return __riscv_vlmul_trunc_v_i8m4_i8mf2(op1
);
450 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vlmul_trunc_v_i8m4_i8m1
451 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]]) #[[ATTR0]] {
452 // CHECK-RV64-NEXT: entry:
453 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[OP1]], i64 0)
454 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
456 vint8m1_t
test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1
) {
457 return __riscv_vlmul_trunc_v_i8m4_i8m1(op1
);
460 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vlmul_trunc_v_i8m4_i8m2
461 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]]) #[[ATTR0]] {
462 // CHECK-RV64-NEXT: entry:
463 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[OP1]], i64 0)
464 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
466 vint8m2_t
test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1
) {
467 return __riscv_vlmul_trunc_v_i8m4_i8m2(op1
);
470 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vlmul_trunc_v_i8m8_i8mf8
471 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]]) #[[ATTR0]] {
472 // CHECK-RV64-NEXT: entry:
473 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv64i8(<vscale x 64 x i8> [[OP1]], i64 0)
474 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
476 vint8mf8_t
test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1
) {
477 return __riscv_vlmul_trunc_v_i8m8_i8mf8(op1
);
480 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vlmul_trunc_v_i8m8_i8mf4
481 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]]) #[[ATTR0]] {
482 // CHECK-RV64-NEXT: entry:
483 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv64i8(<vscale x 64 x i8> [[OP1]], i64 0)
484 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
486 vint8mf4_t
test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1
) {
487 return __riscv_vlmul_trunc_v_i8m8_i8mf4(op1
);
490 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vlmul_trunc_v_i8m8_i8mf2
491 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]]) #[[ATTR0]] {
492 // CHECK-RV64-NEXT: entry:
493 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv64i8(<vscale x 64 x i8> [[OP1]], i64 0)
494 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
496 vint8mf2_t
test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1
) {
497 return __riscv_vlmul_trunc_v_i8m8_i8mf2(op1
);
500 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vlmul_trunc_v_i8m8_i8m1
501 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]]) #[[ATTR0]] {
502 // CHECK-RV64-NEXT: entry:
503 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[OP1]], i64 0)
504 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
506 vint8m1_t
test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1
) {
507 return __riscv_vlmul_trunc_v_i8m8_i8m1(op1
);
510 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vlmul_trunc_v_i8m8_i8m2
511 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]]) #[[ATTR0]] {
512 // CHECK-RV64-NEXT: entry:
513 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[OP1]], i64 0)
514 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
516 vint8m2_t
test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1
) {
517 return __riscv_vlmul_trunc_v_i8m8_i8m2(op1
);
520 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vlmul_trunc_v_i8m8_i8m4
521 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]]) #[[ATTR0]] {
522 // CHECK-RV64-NEXT: entry:
523 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[OP1]], i64 0)
524 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
526 vint8m4_t
test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1
) {
527 return __riscv_vlmul_trunc_v_i8m8_i8m4(op1
);
530 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vlmul_trunc_v_i16mf2_i16mf4
531 // CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]]) #[[ATTR0]] {
532 // CHECK-RV64-NEXT: entry:
533 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.vector.extract.nxv1i16.nxv2i16(<vscale x 2 x i16> [[OP1]], i64 0)
534 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
536 vint16mf4_t
test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1
) {
537 return __riscv_vlmul_trunc_v_i16mf2_i16mf4(op1
);
540 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vlmul_trunc_v_i16m1_i16mf4
541 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]]) #[[ATTR0]] {
542 // CHECK-RV64-NEXT: entry:
543 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.vector.extract.nxv1i16.nxv4i16(<vscale x 4 x i16> [[OP1]], i64 0)
544 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
546 vint16mf4_t
test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1
) {
547 return __riscv_vlmul_trunc_v_i16m1_i16mf4(op1
);
550 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vlmul_trunc_v_i16m1_i16mf2
551 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]]) #[[ATTR0]] {
552 // CHECK-RV64-NEXT: entry:
553 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.vector.extract.nxv2i16.nxv4i16(<vscale x 4 x i16> [[OP1]], i64 0)
554 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
556 vint16mf2_t
test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1
) {
557 return __riscv_vlmul_trunc_v_i16m1_i16mf2(op1
);
560 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vlmul_trunc_v_i16m2_i16mf4
561 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]]) #[[ATTR0]] {
562 // CHECK-RV64-NEXT: entry:
563 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.vector.extract.nxv1i16.nxv8i16(<vscale x 8 x i16> [[OP1]], i64 0)
564 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
566 vint16mf4_t
test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1
) {
567 return __riscv_vlmul_trunc_v_i16m2_i16mf4(op1
);
570 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vlmul_trunc_v_i16m2_i16mf2
571 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]]) #[[ATTR0]] {
572 // CHECK-RV64-NEXT: entry:
573 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.vector.extract.nxv2i16.nxv8i16(<vscale x 8 x i16> [[OP1]], i64 0)
574 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
576 vint16mf2_t
test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1
) {
577 return __riscv_vlmul_trunc_v_i16m2_i16mf2(op1
);
580 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vlmul_trunc_v_i16m2_i16m1
581 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]]) #[[ATTR0]] {
582 // CHECK-RV64-NEXT: entry:
583 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[OP1]], i64 0)
584 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
586 vint16m1_t
test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1
) {
587 return __riscv_vlmul_trunc_v_i16m2_i16m1(op1
);
590 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vlmul_trunc_v_i16m4_i16mf4
591 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]]) #[[ATTR0]] {
592 // CHECK-RV64-NEXT: entry:
593 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.vector.extract.nxv1i16.nxv16i16(<vscale x 16 x i16> [[OP1]], i64 0)
594 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
596 vint16mf4_t
test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1
) {
597 return __riscv_vlmul_trunc_v_i16m4_i16mf4(op1
);
600 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vlmul_trunc_v_i16m4_i16mf2
601 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]]) #[[ATTR0]] {
602 // CHECK-RV64-NEXT: entry:
603 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.vector.extract.nxv2i16.nxv16i16(<vscale x 16 x i16> [[OP1]], i64 0)
604 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
606 vint16mf2_t
test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1
) {
607 return __riscv_vlmul_trunc_v_i16m4_i16mf2(op1
);
610 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vlmul_trunc_v_i16m4_i16m1
611 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]]) #[[ATTR0]] {
612 // CHECK-RV64-NEXT: entry:
613 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[OP1]], i64 0)
614 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
616 vint16m1_t
test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1
) {
617 return __riscv_vlmul_trunc_v_i16m4_i16m1(op1
);
620 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vlmul_trunc_v_i16m4_i16m2
621 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]]) #[[ATTR0]] {
622 // CHECK-RV64-NEXT: entry:
623 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[OP1]], i64 0)
624 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
626 vint16m2_t
test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1
) {
627 return __riscv_vlmul_trunc_v_i16m4_i16m2(op1
);
630 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vlmul_trunc_v_i16m8_i16mf4
631 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]]) #[[ATTR0]] {
632 // CHECK-RV64-NEXT: entry:
633 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.vector.extract.nxv1i16.nxv32i16(<vscale x 32 x i16> [[OP1]], i64 0)
634 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
636 vint16mf4_t
test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1
) {
637 return __riscv_vlmul_trunc_v_i16m8_i16mf4(op1
);
640 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vlmul_trunc_v_i16m8_i16mf2
641 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]]) #[[ATTR0]] {
642 // CHECK-RV64-NEXT: entry:
643 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.vector.extract.nxv2i16.nxv32i16(<vscale x 32 x i16> [[OP1]], i64 0)
644 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
646 vint16mf2_t
test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1
) {
647 return __riscv_vlmul_trunc_v_i16m8_i16mf2(op1
);
650 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vlmul_trunc_v_i16m8_i16m1
651 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]]) #[[ATTR0]] {
652 // CHECK-RV64-NEXT: entry:
653 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[OP1]], i64 0)
654 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
656 vint16m1_t
test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1
) {
657 return __riscv_vlmul_trunc_v_i16m8_i16m1(op1
);
660 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vlmul_trunc_v_i16m8_i16m2
661 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]]) #[[ATTR0]] {
662 // CHECK-RV64-NEXT: entry:
663 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[OP1]], i64 0)
664 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
666 vint16m2_t
test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1
) {
667 return __riscv_vlmul_trunc_v_i16m8_i16m2(op1
);
670 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vlmul_trunc_v_i16m8_i16m4
671 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]]) #[[ATTR0]] {
672 // CHECK-RV64-NEXT: entry:
673 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[OP1]], i64 0)
674 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
676 vint16m4_t
test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1
) {
677 return __riscv_vlmul_trunc_v_i16m8_i16m4(op1
);
680 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vlmul_trunc_v_i32m1_i32mf2
681 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]]) #[[ATTR0]] {
682 // CHECK-RV64-NEXT: entry:
683 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv2i32(<vscale x 2 x i32> [[OP1]], i64 0)
684 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
686 vint32mf2_t
test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1
) {
687 return __riscv_vlmul_trunc_v_i32m1_i32mf2(op1
);
690 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vlmul_trunc_v_i32m2_i32mf2
691 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]]) #[[ATTR0]] {
692 // CHECK-RV64-NEXT: entry:
693 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv4i32(<vscale x 4 x i32> [[OP1]], i64 0)
694 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
696 vint32mf2_t
test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1
) {
697 return __riscv_vlmul_trunc_v_i32m2_i32mf2(op1
);
700 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vlmul_trunc_v_i32m2_i32m1
701 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]]) #[[ATTR0]] {
702 // CHECK-RV64-NEXT: entry:
703 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[OP1]], i64 0)
704 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
706 vint32m1_t
test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1
) {
707 return __riscv_vlmul_trunc_v_i32m2_i32m1(op1
);
710 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vlmul_trunc_v_i32m4_i32mf2
711 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]]) #[[ATTR0]] {
712 // CHECK-RV64-NEXT: entry:
713 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv8i32(<vscale x 8 x i32> [[OP1]], i64 0)
714 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
716 vint32mf2_t
test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1
) {
717 return __riscv_vlmul_trunc_v_i32m4_i32mf2(op1
);
720 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vlmul_trunc_v_i32m4_i32m1
721 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]]) #[[ATTR0]] {
722 // CHECK-RV64-NEXT: entry:
723 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[OP1]], i64 0)
724 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
726 vint32m1_t
test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1
) {
727 return __riscv_vlmul_trunc_v_i32m4_i32m1(op1
);
730 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vlmul_trunc_v_i32m4_i32m2
731 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]]) #[[ATTR0]] {
732 // CHECK-RV64-NEXT: entry:
733 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[OP1]], i64 0)
734 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
736 vint32m2_t
test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1
) {
737 return __riscv_vlmul_trunc_v_i32m4_i32m2(op1
);
740 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vlmul_trunc_v_i32m8_i32mf2
741 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]]) #[[ATTR0]] {
742 // CHECK-RV64-NEXT: entry:
743 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> [[OP1]], i64 0)
744 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
746 vint32mf2_t
test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1
) {
747 return __riscv_vlmul_trunc_v_i32m8_i32mf2(op1
);
750 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vlmul_trunc_v_i32m8_i32m1
751 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]]) #[[ATTR0]] {
752 // CHECK-RV64-NEXT: entry:
753 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[OP1]], i64 0)
754 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
756 vint32m1_t
test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1
) {
757 return __riscv_vlmul_trunc_v_i32m8_i32m1(op1
);
760 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vlmul_trunc_v_i32m8_i32m2
761 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]]) #[[ATTR0]] {
762 // CHECK-RV64-NEXT: entry:
763 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[OP1]], i64 0)
764 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
766 vint32m2_t
test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1
) {
767 return __riscv_vlmul_trunc_v_i32m8_i32m2(op1
);
770 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vlmul_trunc_v_i32m8_i32m4
771 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]]) #[[ATTR0]] {
772 // CHECK-RV64-NEXT: entry:
773 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[OP1]], i64 0)
774 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
776 vint32m4_t
test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1
) {
777 return __riscv_vlmul_trunc_v_i32m8_i32m4(op1
);
780 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vlmul_trunc_v_i64m2_i64m1
781 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]]) #[[ATTR0]] {
782 // CHECK-RV64-NEXT: entry:
783 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[OP1]], i64 0)
784 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
786 vint64m1_t
test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1
) {
787 return __riscv_vlmul_trunc_v_i64m2_i64m1(op1
);
790 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vlmul_trunc_v_i64m4_i64m1
791 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]]) #[[ATTR0]] {
792 // CHECK-RV64-NEXT: entry:
793 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[OP1]], i64 0)
794 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
796 vint64m1_t
test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1
) {
797 return __riscv_vlmul_trunc_v_i64m4_i64m1(op1
);
800 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vlmul_trunc_v_i64m4_i64m2
801 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]]) #[[ATTR0]] {
802 // CHECK-RV64-NEXT: entry:
803 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[OP1]], i64 0)
804 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
806 vint64m2_t
test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1
) {
807 return __riscv_vlmul_trunc_v_i64m4_i64m2(op1
);
810 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vlmul_trunc_v_i64m8_i64m1
811 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]]) #[[ATTR0]] {
812 // CHECK-RV64-NEXT: entry:
813 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[OP1]], i64 0)
814 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
816 vint64m1_t
test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1
) {
817 return __riscv_vlmul_trunc_v_i64m8_i64m1(op1
);
820 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vlmul_trunc_v_i64m8_i64m2
821 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]]) #[[ATTR0]] {
822 // CHECK-RV64-NEXT: entry:
823 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[OP1]], i64 0)
824 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
826 vint64m2_t
test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1
) {
827 return __riscv_vlmul_trunc_v_i64m8_i64m2(op1
);
830 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vlmul_trunc_v_i64m8_i64m4
831 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]]) #[[ATTR0]] {
832 // CHECK-RV64-NEXT: entry:
833 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[OP1]], i64 0)
834 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
836 vint64m4_t
test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1
) {
837 return __riscv_vlmul_trunc_v_i64m8_i64m4(op1
);
840 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vlmul_trunc_v_u8mf4_u8mf8
841 // CHECK-RV64-SAME: (<vscale x 2 x i8> [[OP1:%.*]]) #[[ATTR0]] {
842 // CHECK-RV64-NEXT: entry:
843 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv2i8(<vscale x 2 x i8> [[OP1]], i64 0)
844 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
846 vuint8mf8_t
test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1
) {
847 return __riscv_vlmul_trunc_v_u8mf4_u8mf8(op1
);
850 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vlmul_trunc_v_u8mf2_u8mf8
851 // CHECK-RV64-SAME: (<vscale x 4 x i8> [[OP1:%.*]]) #[[ATTR0]] {
852 // CHECK-RV64-NEXT: entry:
853 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv4i8(<vscale x 4 x i8> [[OP1]], i64 0)
854 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
856 vuint8mf8_t
test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1
) {
857 return __riscv_vlmul_trunc_v_u8mf2_u8mf8(op1
);
860 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vlmul_trunc_v_u8mf2_u8mf4
861 // CHECK-RV64-SAME: (<vscale x 4 x i8> [[OP1:%.*]]) #[[ATTR0]] {
862 // CHECK-RV64-NEXT: entry:
863 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv4i8(<vscale x 4 x i8> [[OP1]], i64 0)
864 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
866 vuint8mf4_t
test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1
) {
867 return __riscv_vlmul_trunc_v_u8mf2_u8mf4(op1
);
870 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vlmul_trunc_v_u8m1_u8mf8
871 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[OP1:%.*]]) #[[ATTR0]] {
872 // CHECK-RV64-NEXT: entry:
873 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv8i8(<vscale x 8 x i8> [[OP1]], i64 0)
874 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
876 vuint8mf8_t
test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1
) {
877 return __riscv_vlmul_trunc_v_u8m1_u8mf8(op1
);
880 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vlmul_trunc_v_u8m1_u8mf4
881 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[OP1:%.*]]) #[[ATTR0]] {
882 // CHECK-RV64-NEXT: entry:
883 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv8i8(<vscale x 8 x i8> [[OP1]], i64 0)
884 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
886 vuint8mf4_t
test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1
) {
887 return __riscv_vlmul_trunc_v_u8m1_u8mf4(op1
);
890 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vlmul_trunc_v_u8m1_u8mf2
891 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[OP1:%.*]]) #[[ATTR0]] {
892 // CHECK-RV64-NEXT: entry:
893 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv8i8(<vscale x 8 x i8> [[OP1]], i64 0)
894 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
896 vuint8mf2_t
test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1
) {
897 return __riscv_vlmul_trunc_v_u8m1_u8mf2(op1
);
900 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vlmul_trunc_v_u8m2_u8mf8
901 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]]) #[[ATTR0]] {
902 // CHECK-RV64-NEXT: entry:
903 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv16i8(<vscale x 16 x i8> [[OP1]], i64 0)
904 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
906 vuint8mf8_t
test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1
) {
907 return __riscv_vlmul_trunc_v_u8m2_u8mf8(op1
);
910 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vlmul_trunc_v_u8m2_u8mf4
911 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]]) #[[ATTR0]] {
912 // CHECK-RV64-NEXT: entry:
913 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> [[OP1]], i64 0)
914 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
916 vuint8mf4_t
test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1
) {
917 return __riscv_vlmul_trunc_v_u8m2_u8mf4(op1
);
920 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vlmul_trunc_v_u8m2_u8mf2
921 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]]) #[[ATTR0]] {
922 // CHECK-RV64-NEXT: entry:
923 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv16i8(<vscale x 16 x i8> [[OP1]], i64 0)
924 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
926 vuint8mf2_t
test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1
) {
927 return __riscv_vlmul_trunc_v_u8m2_u8mf2(op1
);
930 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vlmul_trunc_v_u8m2_u8m1
931 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]]) #[[ATTR0]] {
932 // CHECK-RV64-NEXT: entry:
933 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[OP1]], i64 0)
934 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
936 vuint8m1_t
test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1
) {
937 return __riscv_vlmul_trunc_v_u8m2_u8m1(op1
);
940 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vlmul_trunc_v_u8m4_u8mf8
941 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]]) #[[ATTR0]] {
942 // CHECK-RV64-NEXT: entry:
943 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv32i8(<vscale x 32 x i8> [[OP1]], i64 0)
944 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
946 vuint8mf8_t
test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1
) {
947 return __riscv_vlmul_trunc_v_u8m4_u8mf8(op1
);
950 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vlmul_trunc_v_u8m4_u8mf4
951 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]]) #[[ATTR0]] {
952 // CHECK-RV64-NEXT: entry:
953 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> [[OP1]], i64 0)
954 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
956 vuint8mf4_t
test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1
) {
957 return __riscv_vlmul_trunc_v_u8m4_u8mf4(op1
);
960 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vlmul_trunc_v_u8m4_u8mf2
961 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]]) #[[ATTR0]] {
962 // CHECK-RV64-NEXT: entry:
963 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv32i8(<vscale x 32 x i8> [[OP1]], i64 0)
964 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
966 vuint8mf2_t
test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1
) {
967 return __riscv_vlmul_trunc_v_u8m4_u8mf2(op1
);
970 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vlmul_trunc_v_u8m4_u8m1
971 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]]) #[[ATTR0]] {
972 // CHECK-RV64-NEXT: entry:
973 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[OP1]], i64 0)
974 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
976 vuint8m1_t
test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1
) {
977 return __riscv_vlmul_trunc_v_u8m4_u8m1(op1
);
980 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vlmul_trunc_v_u8m4_u8m2
981 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]]) #[[ATTR0]] {
982 // CHECK-RV64-NEXT: entry:
983 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[OP1]], i64 0)
984 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
986 vuint8m2_t
test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1
) {
987 return __riscv_vlmul_trunc_v_u8m4_u8m2(op1
);
990 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vlmul_trunc_v_u8m8_u8mf8
991 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]]) #[[ATTR0]] {
992 // CHECK-RV64-NEXT: entry:
993 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv64i8(<vscale x 64 x i8> [[OP1]], i64 0)
994 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
996 vuint8mf8_t
test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1
) {
997 return __riscv_vlmul_trunc_v_u8m8_u8mf8(op1
);
1000 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vlmul_trunc_v_u8m8_u8mf4
1001 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]]) #[[ATTR0]] {
1002 // CHECK-RV64-NEXT: entry:
1003 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv64i8(<vscale x 64 x i8> [[OP1]], i64 0)
1004 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
1006 vuint8mf4_t
test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1
) {
1007 return __riscv_vlmul_trunc_v_u8m8_u8mf4(op1
);
1010 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vlmul_trunc_v_u8m8_u8mf2
1011 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]]) #[[ATTR0]] {
1012 // CHECK-RV64-NEXT: entry:
1013 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv64i8(<vscale x 64 x i8> [[OP1]], i64 0)
1014 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
1016 vuint8mf2_t
test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1
) {
1017 return __riscv_vlmul_trunc_v_u8m8_u8mf2(op1
);
1020 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vlmul_trunc_v_u8m8_u8m1
1021 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]]) #[[ATTR0]] {
1022 // CHECK-RV64-NEXT: entry:
1023 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[OP1]], i64 0)
1024 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
1026 vuint8m1_t
test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1
) {
1027 return __riscv_vlmul_trunc_v_u8m8_u8m1(op1
);
1030 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vlmul_trunc_v_u8m8_u8m2
1031 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]]) #[[ATTR0]] {
1032 // CHECK-RV64-NEXT: entry:
1033 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[OP1]], i64 0)
1034 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
1036 vuint8m2_t
test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1
) {
1037 return __riscv_vlmul_trunc_v_u8m8_u8m2(op1
);
1040 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vlmul_trunc_v_u8m8_u8m4
1041 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]]) #[[ATTR0]] {
1042 // CHECK-RV64-NEXT: entry:
1043 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[OP1]], i64 0)
1044 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
1046 vuint8m4_t
test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1
) {
1047 return __riscv_vlmul_trunc_v_u8m8_u8m4(op1
);
1050 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vlmul_trunc_v_u16mf2_u16mf4
1051 // CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]]) #[[ATTR0]] {
1052 // CHECK-RV64-NEXT: entry:
1053 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.vector.extract.nxv1i16.nxv2i16(<vscale x 2 x i16> [[OP1]], i64 0)
1054 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1056 vuint16mf4_t
test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1
) {
1057 return __riscv_vlmul_trunc_v_u16mf2_u16mf4(op1
);
1060 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vlmul_trunc_v_u16m1_u16mf4
1061 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]]) #[[ATTR0]] {
1062 // CHECK-RV64-NEXT: entry:
1063 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.vector.extract.nxv1i16.nxv4i16(<vscale x 4 x i16> [[OP1]], i64 0)
1064 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1066 vuint16mf4_t
test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1
) {
1067 return __riscv_vlmul_trunc_v_u16m1_u16mf4(op1
);
1070 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vlmul_trunc_v_u16m1_u16mf2
1071 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]]) #[[ATTR0]] {
1072 // CHECK-RV64-NEXT: entry:
1073 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.vector.extract.nxv2i16.nxv4i16(<vscale x 4 x i16> [[OP1]], i64 0)
1074 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1076 vuint16mf2_t
test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1
) {
1077 return __riscv_vlmul_trunc_v_u16m1_u16mf2(op1
);
1080 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vlmul_trunc_v_u16m2_u16mf4
1081 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]]) #[[ATTR0]] {
1082 // CHECK-RV64-NEXT: entry:
1083 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.vector.extract.nxv1i16.nxv8i16(<vscale x 8 x i16> [[OP1]], i64 0)
1084 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1086 vuint16mf4_t
test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1
) {
1087 return __riscv_vlmul_trunc_v_u16m2_u16mf4(op1
);
1090 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vlmul_trunc_v_u16m2_u16mf2
1091 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]]) #[[ATTR0]] {
1092 // CHECK-RV64-NEXT: entry:
1093 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.vector.extract.nxv2i16.nxv8i16(<vscale x 8 x i16> [[OP1]], i64 0)
1094 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1096 vuint16mf2_t
test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1
) {
1097 return __riscv_vlmul_trunc_v_u16m2_u16mf2(op1
);
1100 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vlmul_trunc_v_u16m2_u16m1
1101 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]]) #[[ATTR0]] {
1102 // CHECK-RV64-NEXT: entry:
1103 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[OP1]], i64 0)
1104 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1106 vuint16m1_t
test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1
) {
1107 return __riscv_vlmul_trunc_v_u16m2_u16m1(op1
);
1110 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vlmul_trunc_v_u16m4_u16mf4
1111 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]]) #[[ATTR0]] {
1112 // CHECK-RV64-NEXT: entry:
1113 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.vector.extract.nxv1i16.nxv16i16(<vscale x 16 x i16> [[OP1]], i64 0)
1114 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1116 vuint16mf4_t
test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1
) {
1117 return __riscv_vlmul_trunc_v_u16m4_u16mf4(op1
);
1120 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vlmul_trunc_v_u16m4_u16mf2
1121 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]]) #[[ATTR0]] {
1122 // CHECK-RV64-NEXT: entry:
1123 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.vector.extract.nxv2i16.nxv16i16(<vscale x 16 x i16> [[OP1]], i64 0)
1124 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1126 vuint16mf2_t
test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1
) {
1127 return __riscv_vlmul_trunc_v_u16m4_u16mf2(op1
);
1130 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vlmul_trunc_v_u16m4_u16m1
1131 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]]) #[[ATTR0]] {
1132 // CHECK-RV64-NEXT: entry:
1133 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[OP1]], i64 0)
1134 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1136 vuint16m1_t
test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1
) {
1137 return __riscv_vlmul_trunc_v_u16m4_u16m1(op1
);
1140 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vlmul_trunc_v_u16m4_u16m2
1141 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]]) #[[ATTR0]] {
1142 // CHECK-RV64-NEXT: entry:
1143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[OP1]], i64 0)
1144 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1146 vuint16m2_t
test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1
) {
1147 return __riscv_vlmul_trunc_v_u16m4_u16m2(op1
);
1150 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vlmul_trunc_v_u16m8_u16mf4
1151 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]]) #[[ATTR0]] {
1152 // CHECK-RV64-NEXT: entry:
1153 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.vector.extract.nxv1i16.nxv32i16(<vscale x 32 x i16> [[OP1]], i64 0)
1154 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1156 vuint16mf4_t
test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1
) {
1157 return __riscv_vlmul_trunc_v_u16m8_u16mf4(op1
);
1160 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vlmul_trunc_v_u16m8_u16mf2
1161 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]]) #[[ATTR0]] {
1162 // CHECK-RV64-NEXT: entry:
1163 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.vector.extract.nxv2i16.nxv32i16(<vscale x 32 x i16> [[OP1]], i64 0)
1164 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1166 vuint16mf2_t
test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1
) {
1167 return __riscv_vlmul_trunc_v_u16m8_u16mf2(op1
);
1170 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vlmul_trunc_v_u16m8_u16m1
1171 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]]) #[[ATTR0]] {
1172 // CHECK-RV64-NEXT: entry:
1173 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[OP1]], i64 0)
1174 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1176 vuint16m1_t
test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1
) {
1177 return __riscv_vlmul_trunc_v_u16m8_u16m1(op1
);
1180 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vlmul_trunc_v_u16m8_u16m2
1181 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]]) #[[ATTR0]] {
1182 // CHECK-RV64-NEXT: entry:
1183 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[OP1]], i64 0)
1184 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1186 vuint16m2_t
test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1
) {
1187 return __riscv_vlmul_trunc_v_u16m8_u16m2(op1
);
1190 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vlmul_trunc_v_u16m8_u16m4
1191 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]]) #[[ATTR0]] {
1192 // CHECK-RV64-NEXT: entry:
1193 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[OP1]], i64 0)
1194 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1196 vuint16m4_t
test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1
) {
1197 return __riscv_vlmul_trunc_v_u16m8_u16m4(op1
);
1200 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vlmul_trunc_v_u32m1_u32mf2
1201 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]]) #[[ATTR0]] {
1202 // CHECK-RV64-NEXT: entry:
1203 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv2i32(<vscale x 2 x i32> [[OP1]], i64 0)
1204 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1206 vuint32mf2_t
test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1
) {
1207 return __riscv_vlmul_trunc_v_u32m1_u32mf2(op1
);
1210 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vlmul_trunc_v_u32m2_u32mf2
1211 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]]) #[[ATTR0]] {
1212 // CHECK-RV64-NEXT: entry:
1213 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv4i32(<vscale x 4 x i32> [[OP1]], i64 0)
1214 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1216 vuint32mf2_t
test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1
) {
1217 return __riscv_vlmul_trunc_v_u32m2_u32mf2(op1
);
1220 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vlmul_trunc_v_u32m2_u32m1
1221 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]]) #[[ATTR0]] {
1222 // CHECK-RV64-NEXT: entry:
1223 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[OP1]], i64 0)
1224 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1226 vuint32m1_t
test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1
) {
1227 return __riscv_vlmul_trunc_v_u32m2_u32m1(op1
);
1230 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vlmul_trunc_v_u32m4_u32mf2
1231 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]]) #[[ATTR0]] {
1232 // CHECK-RV64-NEXT: entry:
1233 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv8i32(<vscale x 8 x i32> [[OP1]], i64 0)
1234 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1236 vuint32mf2_t
test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1
) {
1237 return __riscv_vlmul_trunc_v_u32m4_u32mf2(op1
);
1240 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vlmul_trunc_v_u32m4_u32m1
1241 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]]) #[[ATTR0]] {
1242 // CHECK-RV64-NEXT: entry:
1243 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[OP1]], i64 0)
1244 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1246 vuint32m1_t
test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1
) {
1247 return __riscv_vlmul_trunc_v_u32m4_u32m1(op1
);
1250 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vlmul_trunc_v_u32m4_u32m2
1251 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]]) #[[ATTR0]] {
1252 // CHECK-RV64-NEXT: entry:
1253 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[OP1]], i64 0)
1254 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1256 vuint32m2_t
test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1
) {
1257 return __riscv_vlmul_trunc_v_u32m4_u32m2(op1
);
1260 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vlmul_trunc_v_u32m8_u32mf2
1261 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]]) #[[ATTR0]] {
1262 // CHECK-RV64-NEXT: entry:
1263 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> [[OP1]], i64 0)
1264 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1266 vuint32mf2_t
test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1
) {
1267 return __riscv_vlmul_trunc_v_u32m8_u32mf2(op1
);
1270 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vlmul_trunc_v_u32m8_u32m1
1271 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]]) #[[ATTR0]] {
1272 // CHECK-RV64-NEXT: entry:
1273 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[OP1]], i64 0)
1274 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1276 vuint32m1_t
test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1
) {
1277 return __riscv_vlmul_trunc_v_u32m8_u32m1(op1
);
1280 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vlmul_trunc_v_u32m8_u32m2
1281 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]]) #[[ATTR0]] {
1282 // CHECK-RV64-NEXT: entry:
1283 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[OP1]], i64 0)
1284 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1286 vuint32m2_t
test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1
) {
1287 return __riscv_vlmul_trunc_v_u32m8_u32m2(op1
);
1290 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vlmul_trunc_v_u32m8_u32m4
1291 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]]) #[[ATTR0]] {
1292 // CHECK-RV64-NEXT: entry:
1293 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[OP1]], i64 0)
1294 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1296 vuint32m4_t
test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1
) {
1297 return __riscv_vlmul_trunc_v_u32m8_u32m4(op1
);
1300 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vlmul_trunc_v_u64m2_u64m1
1301 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]]) #[[ATTR0]] {
1302 // CHECK-RV64-NEXT: entry:
1303 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[OP1]], i64 0)
1304 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1306 vuint64m1_t
test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1
) {
1307 return __riscv_vlmul_trunc_v_u64m2_u64m1(op1
);
1310 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vlmul_trunc_v_u64m4_u64m1
1311 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]]) #[[ATTR0]] {
1312 // CHECK-RV64-NEXT: entry:
1313 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[OP1]], i64 0)
1314 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1316 vuint64m1_t
test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1
) {
1317 return __riscv_vlmul_trunc_v_u64m4_u64m1(op1
);
1320 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vlmul_trunc_v_u64m4_u64m2
1321 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]]) #[[ATTR0]] {
1322 // CHECK-RV64-NEXT: entry:
1323 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[OP1]], i64 0)
1324 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1326 vuint64m2_t
test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1
) {
1327 return __riscv_vlmul_trunc_v_u64m4_u64m2(op1
);
1330 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vlmul_trunc_v_u64m8_u64m1
1331 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]]) #[[ATTR0]] {
1332 // CHECK-RV64-NEXT: entry:
1333 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[OP1]], i64 0)
1334 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1336 vuint64m1_t
test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1
) {
1337 return __riscv_vlmul_trunc_v_u64m8_u64m1(op1
);
1340 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vlmul_trunc_v_u64m8_u64m2
1341 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]]) #[[ATTR0]] {
1342 // CHECK-RV64-NEXT: entry:
1343 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[OP1]], i64 0)
1344 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1346 vuint64m2_t
test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1
) {
1347 return __riscv_vlmul_trunc_v_u64m8_u64m2(op1
);
1350 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vlmul_trunc_v_u64m8_u64m4
1351 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]]) #[[ATTR0]] {
1352 // CHECK-RV64-NEXT: entry:
1353 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[OP1]], i64 0)
1354 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1356 vuint64m4_t
test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1
) {
1357 return __riscv_vlmul_trunc_v_u64m8_u64m4(op1
);