1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \
4 // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
6 #include <riscv_vector.h>
8 // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m2(
9 // CHECK-RV64-NEXT: entry:
10 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
11 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
13 vint8m2_t
test_vset_v_i8m1_i8m2(vint8m2_t dest
, size_t index
, vint8m1_t val
) {
14 return __riscv_vset(dest
, 0, val
);
17 // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m4(
18 // CHECK-RV64-NEXT: entry:
19 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
20 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
22 vint8m4_t
test_vset_v_i8m1_i8m4(vint8m4_t dest
, size_t index
, vint8m1_t val
) {
23 return __riscv_vset(dest
, 0, val
);
26 // CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m4(
27 // CHECK-RV64-NEXT: entry:
28 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 0)
29 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
31 vint8m4_t
test_vset_v_i8m2_i8m4(vint8m4_t dest
, size_t index
, vint8m2_t val
) {
32 return __riscv_vset(dest
, 0, val
);
35 // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m8(
36 // CHECK-RV64-NEXT: entry:
37 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
38 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
40 vint8m8_t
test_vset_v_i8m1_i8m8(vint8m8_t dest
, size_t index
, vint8m1_t val
) {
41 return __riscv_vset(dest
, 0, val
);
44 // CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m8(
45 // CHECK-RV64-NEXT: entry:
46 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 0)
47 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
49 vint8m8_t
test_vset_v_i8m2_i8m8(vint8m8_t dest
, size_t index
, vint8m2_t val
) {
50 return __riscv_vset(dest
, 0, val
);
53 // CHECK-RV64-LABEL: @test_vset_v_i8m4_i8m8(
54 // CHECK-RV64-NEXT: entry:
55 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VAL:%.*]], i64 0)
56 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
58 vint8m8_t
test_vset_v_i8m4_i8m8(vint8m8_t dest
, size_t index
, vint8m4_t val
) {
59 return __riscv_vset(dest
, 0, val
);
62 // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m2(
63 // CHECK-RV64-NEXT: entry:
64 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
65 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
67 vuint8m2_t
test_vset_v_u8m1_u8m2(vuint8m2_t dest
, size_t index
, vuint8m1_t val
) {
68 return __riscv_vset(dest
, 0, val
);
71 // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m4(
72 // CHECK-RV64-NEXT: entry:
73 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
74 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
76 vuint8m4_t
test_vset_v_u8m1_u8m4(vuint8m4_t dest
, size_t index
, vuint8m1_t val
) {
77 return __riscv_vset(dest
, 0, val
);
80 // CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m4(
81 // CHECK-RV64-NEXT: entry:
82 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 0)
83 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
85 vuint8m4_t
test_vset_v_u8m2_u8m4(vuint8m4_t dest
, size_t index
, vuint8m2_t val
) {
86 return __riscv_vset(dest
, 0, val
);
89 // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m8(
90 // CHECK-RV64-NEXT: entry:
91 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
92 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
94 vuint8m8_t
test_vset_v_u8m1_u8m8(vuint8m8_t dest
, size_t index
, vuint8m1_t val
) {
95 return __riscv_vset(dest
, 0, val
);
98 // CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m8(
99 // CHECK-RV64-NEXT: entry:
100 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 0)
101 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
103 vuint8m8_t
test_vset_v_u8m2_u8m8(vuint8m8_t dest
, size_t index
, vuint8m2_t val
) {
104 return __riscv_vset(dest
, 0, val
);
107 // CHECK-RV64-LABEL: @test_vset_v_u8m4_u8m8(
108 // CHECK-RV64-NEXT: entry:
109 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VAL:%.*]], i64 0)
110 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
112 vuint8m8_t
test_vset_v_u8m4_u8m8(vuint8m8_t dest
, size_t index
, vuint8m4_t val
) {
113 return __riscv_vset(dest
, 0, val
);
116 // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m2(
117 // CHECK-RV64-NEXT: entry:
118 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
119 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
121 vint16m2_t
test_vset_v_i16m1_i16m2(vint16m2_t dest
, size_t index
, vint16m1_t val
) {
122 return __riscv_vset(dest
, 0, val
);
125 // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m4(
126 // CHECK-RV64-NEXT: entry:
127 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
128 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
130 vint16m4_t
test_vset_v_i16m1_i16m4(vint16m4_t dest
, size_t index
, vint16m1_t val
) {
131 return __riscv_vset(dest
, 0, val
);
134 // CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m4(
135 // CHECK-RV64-NEXT: entry:
136 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 0)
137 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
139 vint16m4_t
test_vset_v_i16m2_i16m4(vint16m4_t dest
, size_t index
, vint16m2_t val
) {
140 return __riscv_vset(dest
, 0, val
);
143 // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m8(
144 // CHECK-RV64-NEXT: entry:
145 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
146 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
148 vint16m8_t
test_vset_v_i16m1_i16m8(vint16m8_t dest
, size_t index
, vint16m1_t val
) {
149 return __riscv_vset(dest
, 0, val
);
152 // CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m8(
153 // CHECK-RV64-NEXT: entry:
154 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 0)
155 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
157 vint16m8_t
test_vset_v_i16m2_i16m8(vint16m8_t dest
, size_t index
, vint16m2_t val
) {
158 return __riscv_vset(dest
, 0, val
);
161 // CHECK-RV64-LABEL: @test_vset_v_i16m4_i16m8(
162 // CHECK-RV64-NEXT: entry:
163 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VAL:%.*]], i64 0)
164 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
166 vint16m8_t
test_vset_v_i16m4_i16m8(vint16m8_t dest
, size_t index
, vint16m4_t val
) {
167 return __riscv_vset(dest
, 0, val
);
170 // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m2(
171 // CHECK-RV64-NEXT: entry:
172 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
173 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
175 vuint16m2_t
test_vset_v_u16m1_u16m2(vuint16m2_t dest
, size_t index
, vuint16m1_t val
) {
176 return __riscv_vset(dest
, 0, val
);
179 // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m4(
180 // CHECK-RV64-NEXT: entry:
181 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
182 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
184 vuint16m4_t
test_vset_v_u16m1_u16m4(vuint16m4_t dest
, size_t index
, vuint16m1_t val
) {
185 return __riscv_vset(dest
, 0, val
);
188 // CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m4(
189 // CHECK-RV64-NEXT: entry:
190 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 0)
191 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
193 vuint16m4_t
test_vset_v_u16m2_u16m4(vuint16m4_t dest
, size_t index
, vuint16m2_t val
) {
194 return __riscv_vset(dest
, 0, val
);
197 // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m8(
198 // CHECK-RV64-NEXT: entry:
199 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
200 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
202 vuint16m8_t
test_vset_v_u16m1_u16m8(vuint16m8_t dest
, size_t index
, vuint16m1_t val
) {
203 return __riscv_vset(dest
, 0, val
);
206 // CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m8(
207 // CHECK-RV64-NEXT: entry:
208 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 0)
209 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
211 vuint16m8_t
test_vset_v_u16m2_u16m8(vuint16m8_t dest
, size_t index
, vuint16m2_t val
) {
212 return __riscv_vset(dest
, 0, val
);
215 // CHECK-RV64-LABEL: @test_vset_v_u16m4_u16m8(
216 // CHECK-RV64-NEXT: entry:
217 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VAL:%.*]], i64 0)
218 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
220 vuint16m8_t
test_vset_v_u16m4_u16m8(vuint16m8_t dest
, size_t index
, vuint16m4_t val
) {
221 return __riscv_vset(dest
, 0, val
);
224 // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m2(
225 // CHECK-RV64-NEXT: entry:
226 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
227 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
229 vint32m2_t
test_vset_v_i32m1_i32m2(vint32m2_t dest
, size_t index
, vint32m1_t val
) {
230 return __riscv_vset(dest
, 0, val
);
233 // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m4(
234 // CHECK-RV64-NEXT: entry:
235 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
236 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
238 vint32m4_t
test_vset_v_i32m1_i32m4(vint32m4_t dest
, size_t index
, vint32m1_t val
) {
239 return __riscv_vset(dest
, 0, val
);
242 // CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m4(
243 // CHECK-RV64-NEXT: entry:
244 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 0)
245 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
247 vint32m4_t
test_vset_v_i32m2_i32m4(vint32m4_t dest
, size_t index
, vint32m2_t val
) {
248 return __riscv_vset(dest
, 0, val
);
251 // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m8(
252 // CHECK-RV64-NEXT: entry:
253 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
254 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
256 vint32m8_t
test_vset_v_i32m1_i32m8(vint32m8_t dest
, size_t index
, vint32m1_t val
) {
257 return __riscv_vset(dest
, 0, val
);
260 // CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m8(
261 // CHECK-RV64-NEXT: entry:
262 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 0)
263 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
265 vint32m8_t
test_vset_v_i32m2_i32m8(vint32m8_t dest
, size_t index
, vint32m2_t val
) {
266 return __riscv_vset(dest
, 0, val
);
269 // CHECK-RV64-LABEL: @test_vset_v_i32m4_i32m8(
270 // CHECK-RV64-NEXT: entry:
271 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VAL:%.*]], i64 0)
272 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
274 vint32m8_t
test_vset_v_i32m4_i32m8(vint32m8_t dest
, size_t index
, vint32m4_t val
) {
275 return __riscv_vset(dest
, 0, val
);
278 // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m2(
279 // CHECK-RV64-NEXT: entry:
280 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
281 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
283 vuint32m2_t
test_vset_v_u32m1_u32m2(vuint32m2_t dest
, size_t index
, vuint32m1_t val
) {
284 return __riscv_vset(dest
, 0, val
);
287 // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m4(
288 // CHECK-RV64-NEXT: entry:
289 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
290 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
292 vuint32m4_t
test_vset_v_u32m1_u32m4(vuint32m4_t dest
, size_t index
, vuint32m1_t val
) {
293 return __riscv_vset(dest
, 0, val
);
296 // CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m4(
297 // CHECK-RV64-NEXT: entry:
298 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 0)
299 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
301 vuint32m4_t
test_vset_v_u32m2_u32m4(vuint32m4_t dest
, size_t index
, vuint32m2_t val
) {
302 return __riscv_vset(dest
, 0, val
);
305 // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m8(
306 // CHECK-RV64-NEXT: entry:
307 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
308 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
310 vuint32m8_t
test_vset_v_u32m1_u32m8(vuint32m8_t dest
, size_t index
, vuint32m1_t val
) {
311 return __riscv_vset(dest
, 0, val
);
314 // CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m8(
315 // CHECK-RV64-NEXT: entry:
316 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 0)
317 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
319 vuint32m8_t
test_vset_v_u32m2_u32m8(vuint32m8_t dest
, size_t index
, vuint32m2_t val
) {
320 return __riscv_vset(dest
, 0, val
);
323 // CHECK-RV64-LABEL: @test_vset_v_u32m4_u32m8(
324 // CHECK-RV64-NEXT: entry:
325 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VAL:%.*]], i64 0)
326 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
328 vuint32m8_t
test_vset_v_u32m4_u32m8(vuint32m8_t dest
, size_t index
, vuint32m4_t val
) {
329 return __riscv_vset(dest
, 0, val
);
332 // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m2(
333 // CHECK-RV64-NEXT: entry:
334 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 0)
335 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
337 vfloat32m2_t
test_vset_v_f32m1_f32m2(vfloat32m2_t dest
, size_t index
, vfloat32m1_t val
) {
338 return __riscv_vset(dest
, 0, val
);
341 // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m4(
342 // CHECK-RV64-NEXT: entry:
343 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.vector.insert.nxv8f32.nxv2f32(<vscale x 8 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 0)
344 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
346 vfloat32m4_t
test_vset_v_f32m1_f32m4(vfloat32m4_t dest
, size_t index
, vfloat32m1_t val
) {
347 return __riscv_vset(dest
, 0, val
);
350 // CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m4(
351 // CHECK-RV64-NEXT: entry:
352 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> [[DEST:%.*]], <vscale x 4 x float> [[VAL:%.*]], i64 0)
353 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
355 vfloat32m4_t
test_vset_v_f32m2_f32m4(vfloat32m4_t dest
, size_t index
, vfloat32m2_t val
) {
356 return __riscv_vset(dest
, 0, val
);
359 // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m8(
360 // CHECK-RV64-NEXT: entry:
361 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv2f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 0)
362 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
364 vfloat32m8_t
test_vset_v_f32m1_f32m8(vfloat32m8_t dest
, size_t index
, vfloat32m1_t val
) {
365 return __riscv_vset(dest
, 0, val
);
368 // CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m8(
369 // CHECK-RV64-NEXT: entry:
370 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 4 x float> [[VAL:%.*]], i64 0)
371 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
373 vfloat32m8_t
test_vset_v_f32m2_f32m8(vfloat32m8_t dest
, size_t index
, vfloat32m2_t val
) {
374 return __riscv_vset(dest
, 0, val
);
377 // CHECK-RV64-LABEL: @test_vset_v_f32m4_f32m8(
378 // CHECK-RV64-NEXT: entry:
379 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv8f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 8 x float> [[VAL:%.*]], i64 0)
380 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
382 vfloat32m8_t
test_vset_v_f32m4_f32m8(vfloat32m8_t dest
, size_t index
, vfloat32m4_t val
) {
383 return __riscv_vset(dest
, 0, val
);
386 // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m2(
387 // CHECK-RV64-NEXT: entry:
388 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
389 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
391 vint64m2_t
test_vset_v_i64m1_i64m2(vint64m2_t dest
, size_t index
, vint64m1_t val
) {
392 return __riscv_vset(dest
, 0, val
);
395 // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m4(
396 // CHECK-RV64-NEXT: entry:
397 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
398 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
400 vint64m4_t
test_vset_v_i64m1_i64m4(vint64m4_t dest
, size_t index
, vint64m1_t val
) {
401 return __riscv_vset(dest
, 0, val
);
404 // CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m4(
405 // CHECK-RV64-NEXT: entry:
406 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 0)
407 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
409 vint64m4_t
test_vset_v_i64m2_i64m4(vint64m4_t dest
, size_t index
, vint64m2_t val
) {
410 return __riscv_vset(dest
, 0, val
);
413 // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m8(
414 // CHECK-RV64-NEXT: entry:
415 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
416 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
418 vint64m8_t
test_vset_v_i64m1_i64m8(vint64m8_t dest
, size_t index
, vint64m1_t val
) {
419 return __riscv_vset(dest
, 0, val
);
422 // CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m8(
423 // CHECK-RV64-NEXT: entry:
424 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 0)
425 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
427 vint64m8_t
test_vset_v_i64m2_i64m8(vint64m8_t dest
, size_t index
, vint64m2_t val
) {
428 return __riscv_vset(dest
, 0, val
);
431 // CHECK-RV64-LABEL: @test_vset_v_i64m4_i64m8(
432 // CHECK-RV64-NEXT: entry:
433 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VAL:%.*]], i64 0)
434 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
436 vint64m8_t
test_vset_v_i64m4_i64m8(vint64m8_t dest
, size_t index
, vint64m4_t val
) {
437 return __riscv_vset(dest
, 0, val
);
440 // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m2(
441 // CHECK-RV64-NEXT: entry:
442 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
443 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
445 vuint64m2_t
test_vset_v_u64m1_u64m2(vuint64m2_t dest
, size_t index
, vuint64m1_t val
) {
446 return __riscv_vset(dest
, 0, val
);
449 // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m4(
450 // CHECK-RV64-NEXT: entry:
451 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
452 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
454 vuint64m4_t
test_vset_v_u64m1_u64m4(vuint64m4_t dest
, size_t index
, vuint64m1_t val
) {
455 return __riscv_vset(dest
, 0, val
);
458 // CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m4(
459 // CHECK-RV64-NEXT: entry:
460 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 0)
461 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
463 vuint64m4_t
test_vset_v_u64m2_u64m4(vuint64m4_t dest
, size_t index
, vuint64m2_t val
) {
464 return __riscv_vset(dest
, 0, val
);
467 // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m8(
468 // CHECK-RV64-NEXT: entry:
469 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
470 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
472 vuint64m8_t
test_vset_v_u64m1_u64m8(vuint64m8_t dest
, size_t index
, vuint64m1_t val
) {
473 return __riscv_vset(dest
, 0, val
);
476 // CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m8(
477 // CHECK-RV64-NEXT: entry:
478 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 0)
479 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
481 vuint64m8_t
test_vset_v_u64m2_u64m8(vuint64m8_t dest
, size_t index
, vuint64m2_t val
) {
482 return __riscv_vset(dest
, 0, val
);
485 // CHECK-RV64-LABEL: @test_vset_v_u64m4_u64m8(
486 // CHECK-RV64-NEXT: entry:
487 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VAL:%.*]], i64 0)
488 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
490 vuint64m8_t
test_vset_v_u64m4_u64m8(vuint64m8_t dest
, size_t index
, vuint64m4_t val
) {
491 return __riscv_vset(dest
, 0, val
);
494 // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m2(
495 // CHECK-RV64-NEXT: entry:
496 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.nxv1f64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 0)
497 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
499 vfloat64m2_t
test_vset_v_f64m1_f64m2(vfloat64m2_t dest
, size_t index
, vfloat64m1_t val
) {
500 return __riscv_vset(dest
, 0, val
);
503 // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m4(
504 // CHECK-RV64-NEXT: entry:
505 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.vector.insert.nxv4f64.nxv1f64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 0)
506 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
508 vfloat64m4_t
test_vset_v_f64m1_f64m4(vfloat64m4_t dest
, size_t index
, vfloat64m1_t val
) {
509 return __riscv_vset(dest
, 0, val
);
512 // CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m4(
513 // CHECK-RV64-NEXT: entry:
514 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.vector.insert.nxv4f64.nxv2f64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 2 x double> [[VAL:%.*]], i64 0)
515 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
517 vfloat64m4_t
test_vset_v_f64m2_f64m4(vfloat64m4_t dest
, size_t index
, vfloat64m2_t val
) {
518 return __riscv_vset(dest
, 0, val
);
521 // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m8(
522 // CHECK-RV64-NEXT: entry:
523 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nxv1f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 0)
524 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
526 vfloat64m8_t
test_vset_v_f64m1_f64m8(vfloat64m8_t dest
, size_t index
, vfloat64m1_t val
) {
527 return __riscv_vset(dest
, 0, val
);
530 // CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m8(
531 // CHECK-RV64-NEXT: entry:
532 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nxv2f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 2 x double> [[VAL:%.*]], i64 0)
533 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
535 vfloat64m8_t
test_vset_v_f64m2_f64m8(vfloat64m8_t dest
, size_t index
, vfloat64m2_t val
) {
536 return __riscv_vset(dest
, 0, val
);
539 // CHECK-RV64-LABEL: @test_vset_v_f64m4_f64m8(
540 // CHECK-RV64-NEXT: entry:
541 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nxv4f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 4 x double> [[VAL:%.*]], i64 0)
542 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
544 vfloat64m8_t
test_vset_v_f64m4_f64m8(vfloat64m8_t dest
, size_t index
, vfloat64m4_t val
) {
545 return __riscv_vset(dest
, 0, val
);