Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / RISCV / rvv-intrinsics-handcrafted / vget-overloaded.c
blob90af56a153a287f00fa3292fc97c535ae54f20a4
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \
4 // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
6 #include <riscv_vector.h>
8 // CHECK-RV64-LABEL: @test_vget_v_i8m2_i8m1(
9 // CHECK-RV64-NEXT: entry:
10 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]], i64 0)
11 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
13 vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src, size_t index) {
14 return __riscv_vget_i8m1(src, 0);
17 // CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m1(
18 // CHECK-RV64-NEXT: entry:
19 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 0)
20 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
22 vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src, size_t index) {
23 return __riscv_vget_i8m1(src, 0);
26 // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m1(
27 // CHECK-RV64-NEXT: entry:
28 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
29 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
31 vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src, size_t index) {
32 return __riscv_vget_i8m1(src, 0);
35 // CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m2(
36 // CHECK-RV64-NEXT: entry:
37 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 0)
38 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
40 vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src, size_t index) {
41 return __riscv_vget_i8m2(src, 0);
44 // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m2(
45 // CHECK-RV64-NEXT: entry:
46 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
47 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
49 vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src, size_t index) {
50 return __riscv_vget_i8m2(src, 0);
53 // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m4(
54 // CHECK-RV64-NEXT: entry:
55 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
56 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
58 vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src, size_t index) {
59 return __riscv_vget_i8m4(src, 0);
62 // CHECK-RV64-LABEL: @test_vget_v_u8m2_u8m1(
63 // CHECK-RV64-NEXT: entry:
64 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]], i64 0)
65 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
67 vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src, size_t index) {
68 return __riscv_vget_u8m1(src, 0);
71 // CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m1(
72 // CHECK-RV64-NEXT: entry:
73 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 0)
74 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
76 vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src, size_t index) {
77 return __riscv_vget_u8m1(src, 0);
80 // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m1(
81 // CHECK-RV64-NEXT: entry:
82 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
83 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
85 vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src, size_t index) {
86 return __riscv_vget_u8m1(src, 0);
89 // CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m2(
90 // CHECK-RV64-NEXT: entry:
91 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 0)
92 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
94 vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src, size_t index) {
95 return __riscv_vget_u8m2(src, 0);
98 // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m2(
99 // CHECK-RV64-NEXT: entry:
100 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
101 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
103 vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src, size_t index) {
104 return __riscv_vget_u8m2(src, 0);
107 // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m4(
108 // CHECK-RV64-NEXT: entry:
109 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
110 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
112 vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src, size_t index) {
113 return __riscv_vget_u8m4(src, 0);
116 // CHECK-RV64-LABEL: @test_vget_v_i16m2_i16m1(
117 // CHECK-RV64-NEXT: entry:
118 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]], i64 0)
119 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
121 vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src, size_t index) {
122 return __riscv_vget_i16m1(src, 0);
125 // CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m1(
126 // CHECK-RV64-NEXT: entry:
127 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 0)
128 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
130 vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src, size_t index) {
131 return __riscv_vget_i16m1(src, 0);
134 // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m1(
135 // CHECK-RV64-NEXT: entry:
136 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
137 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
139 vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src, size_t index) {
140 return __riscv_vget_i16m1(src, 0);
143 // CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m2(
144 // CHECK-RV64-NEXT: entry:
145 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 0)
146 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
148 vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src, size_t index) {
149 return __riscv_vget_i16m2(src, 0);
152 // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m2(
153 // CHECK-RV64-NEXT: entry:
154 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
155 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
157 vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src, size_t index) {
158 return __riscv_vget_i16m2(src, 0);
161 // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m4(
162 // CHECK-RV64-NEXT: entry:
163 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
164 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
166 vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src, size_t index) {
167 return __riscv_vget_i16m4(src, 0);
170 // CHECK-RV64-LABEL: @test_vget_v_u16m2_u16m1(
171 // CHECK-RV64-NEXT: entry:
172 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]], i64 0)
173 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
175 vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src, size_t index) {
176 return __riscv_vget_u16m1(src, 0);
179 // CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m1(
180 // CHECK-RV64-NEXT: entry:
181 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 0)
182 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
184 vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src, size_t index) {
185 return __riscv_vget_u16m1(src, 0);
188 // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m1(
189 // CHECK-RV64-NEXT: entry:
190 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
191 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
193 vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src, size_t index) {
194 return __riscv_vget_u16m1(src, 0);
197 // CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m2(
198 // CHECK-RV64-NEXT: entry:
199 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 0)
200 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
202 vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src, size_t index) {
203 return __riscv_vget_u16m2(src, 0);
206 // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m2(
207 // CHECK-RV64-NEXT: entry:
208 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
209 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
211 vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src, size_t index) {
212 return __riscv_vget_u16m2(src, 0);
215 // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m4(
216 // CHECK-RV64-NEXT: entry:
217 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
218 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
220 vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src, size_t index) {
221 return __riscv_vget_u16m4(src, 0);
224 // CHECK-RV64-LABEL: @test_vget_v_i32m2_i32m1(
225 // CHECK-RV64-NEXT: entry:
226 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]], i64 0)
227 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
229 vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src, size_t index) {
230 return __riscv_vget_i32m1(src, 0);
233 // CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m1(
234 // CHECK-RV64-NEXT: entry:
235 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 0)
236 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
238 vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src, size_t index) {
239 return __riscv_vget_i32m1(src, 0);
242 // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m1(
243 // CHECK-RV64-NEXT: entry:
244 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
245 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
247 vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src, size_t index) {
248 return __riscv_vget_i32m1(src, 0);
251 // CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m2(
252 // CHECK-RV64-NEXT: entry:
253 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 0)
254 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
256 vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src, size_t index) {
257 return __riscv_vget_i32m2(src, 0);
260 // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m2(
261 // CHECK-RV64-NEXT: entry:
262 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
263 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
265 vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src, size_t index) {
266 return __riscv_vget_i32m2(src, 0);
269 // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m4(
270 // CHECK-RV64-NEXT: entry:
271 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
272 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
274 vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src, size_t index) {
275 return __riscv_vget_i32m4(src, 0);
278 // CHECK-RV64-LABEL: @test_vget_v_u32m2_u32m1(
279 // CHECK-RV64-NEXT: entry:
280 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]], i64 0)
281 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
283 vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src, size_t index) {
284 return __riscv_vget_u32m1(src, 0);
287 // CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m1(
288 // CHECK-RV64-NEXT: entry:
289 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 0)
290 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
292 vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src, size_t index) {
293 return __riscv_vget_u32m1(src, 0);
296 // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m1(
297 // CHECK-RV64-NEXT: entry:
298 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
299 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
301 vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src, size_t index) {
302 return __riscv_vget_u32m1(src, 0);
305 // CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m2(
306 // CHECK-RV64-NEXT: entry:
307 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 0)
308 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
310 vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src, size_t index) {
311 return __riscv_vget_u32m2(src, 0);
314 // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m2(
315 // CHECK-RV64-NEXT: entry:
316 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
317 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
319 vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src, size_t index) {
320 return __riscv_vget_u32m2(src, 0);
323 // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m4(
324 // CHECK-RV64-NEXT: entry:
325 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
326 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
328 vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src, size_t index) {
329 return __riscv_vget_u32m4(src, 0);
332 // CHECK-RV64-LABEL: @test_vget_v_f32m2_f32m1(
333 // CHECK-RV64-NEXT: entry:
334 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> [[SRC:%.*]], i64 0)
335 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
337 vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src, size_t index) {
338 return __riscv_vget_f32m1(src, 0);
341 // CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m1(
342 // CHECK-RV64-NEXT: entry:
343 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv8f32(<vscale x 8 x float> [[SRC:%.*]], i64 0)
344 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
346 vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src, size_t index) {
347 return __riscv_vget_f32m1(src, 0);
350 // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m1(
351 // CHECK-RV64-NEXT: entry:
352 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 0)
353 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
355 vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src, size_t index) {
356 return __riscv_vget_f32m1(src, 0);
359 // CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m2(
360 // CHECK-RV64-NEXT: entry:
361 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[SRC:%.*]], i64 0)
362 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
364 vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src, size_t index) {
365 return __riscv_vget_f32m2(src, 0);
368 // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m2(
369 // CHECK-RV64-NEXT: entry:
370 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 0)
371 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
373 vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src, size_t index) {
374 return __riscv_vget_f32m2(src, 0);
377 // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m4(
378 // CHECK-RV64-NEXT: entry:
379 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.vector.extract.nxv8f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 0)
380 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
382 vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src, size_t index) {
383 return __riscv_vget_f32m4(src, 0);
386 // CHECK-RV64-LABEL: @test_vget_v_i64m2_i64m1(
387 // CHECK-RV64-NEXT: entry:
388 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]], i64 0)
389 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
391 vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src, size_t index) {
392 return __riscv_vget_i64m1(src, 0);
395 // CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m1(
396 // CHECK-RV64-NEXT: entry:
397 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0)
398 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
400 vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src, size_t index) {
401 return __riscv_vget_i64m1(src, 0);
404 // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m1(
405 // CHECK-RV64-NEXT: entry:
406 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
407 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
409 vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src, size_t index) {
410 return __riscv_vget_i64m1(src, 0);
413 // CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m2(
414 // CHECK-RV64-NEXT: entry:
415 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0)
416 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
418 vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src, size_t index) {
419 return __riscv_vget_i64m2(src, 0);
422 // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m2(
423 // CHECK-RV64-NEXT: entry:
424 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
425 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
427 vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src, size_t index) {
428 return __riscv_vget_i64m2(src, 0);
431 // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m4(
432 // CHECK-RV64-NEXT: entry:
433 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
434 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
436 vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src, size_t index) {
437 return __riscv_vget_i64m4(src, 0);
440 // CHECK-RV64-LABEL: @test_vget_v_u64m2_u64m1(
441 // CHECK-RV64-NEXT: entry:
442 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]], i64 0)
443 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
445 vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src, size_t index) {
446 return __riscv_vget_u64m1(src, 0);
449 // CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m1(
450 // CHECK-RV64-NEXT: entry:
451 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0)
452 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
454 vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src, size_t index) {
455 return __riscv_vget_u64m1(src, 0);
458 // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m1(
459 // CHECK-RV64-NEXT: entry:
460 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
461 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
463 vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src, size_t index) {
464 return __riscv_vget_u64m1(src, 0);
467 // CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m2(
468 // CHECK-RV64-NEXT: entry:
469 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0)
470 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
472 vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src, size_t index) {
473 return __riscv_vget_u64m2(src, 0);
476 // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m2(
477 // CHECK-RV64-NEXT: entry:
478 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
479 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
481 vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src, size_t index) {
482 return __riscv_vget_u64m2(src, 0);
485 // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m4(
486 // CHECK-RV64-NEXT: entry:
487 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
488 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
490 vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src, size_t index) {
491 return __riscv_vget_u64m4(src, 0);
494 // CHECK-RV64-LABEL: @test_vget_v_f64m2_f64m1(
495 // CHECK-RV64-NEXT: entry:
496 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.vector.extract.nxv1f64.nxv2f64(<vscale x 2 x double> [[SRC:%.*]], i64 0)
497 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
499 vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src, size_t index) {
500 return __riscv_vget_f64m1(src, 0);
503 // CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m1(
504 // CHECK-RV64-NEXT: entry:
505 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.vector.extract.nxv1f64.nxv4f64(<vscale x 4 x double> [[SRC:%.*]], i64 0)
506 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
508 vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src, size_t index) {
509 return __riscv_vget_f64m1(src, 0);
512 // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m1(
513 // CHECK-RV64-NEXT: entry:
514 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.vector.extract.nxv1f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 0)
515 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
517 vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src, size_t index) {
518 return __riscv_vget_f64m1(src, 0);
521 // CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m2(
522 // CHECK-RV64-NEXT: entry:
523 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[SRC:%.*]], i64 0)
524 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
526 vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src, size_t index) {
527 return __riscv_vget_f64m2(src, 0);
530 // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m2(
531 // CHECK-RV64-NEXT: entry:
532 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 0)
533 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
535 vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src, size_t index) {
536 return __riscv_vget_f64m2(src, 0);
539 // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m4(
540 // CHECK-RV64-NEXT: entry:
541 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.vector.extract.nxv4f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 0)
542 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
544 vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src, size_t index) {
545 return __riscv_vget_f64m4(src, 0);