1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -verify-machineinstrs < %s | FileCheck %s
3 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -verify-machineinstrs < %s | FileCheck %s
5 define <vscale x 1 x i8> @sextload_nxv1i1_nxv1i8(ptr %x) {
6 ; CHECK-LABEL: sextload_nxv1i1_nxv1i8:
8 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
9 ; CHECK-NEXT: vlm.v v0, (a0)
10 ; CHECK-NEXT: vmv.v.i v8, 0
11 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
13 %y = load <vscale x 1 x i1>, ptr %x
14 %z = sext <vscale x 1 x i1> %y to <vscale x 1 x i8>
15 ret <vscale x 1 x i8> %z
18 define <vscale x 1 x i16> @sextload_nxv1i8_nxv1i16(ptr %x) {
19 ; CHECK-LABEL: sextload_nxv1i8_nxv1i16:
21 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
22 ; CHECK-NEXT: vle8.v v9, (a0)
23 ; CHECK-NEXT: vsext.vf2 v8, v9
25 %y = load <vscale x 1 x i8>, ptr %x
26 %z = sext <vscale x 1 x i8> %y to <vscale x 1 x i16>
27 ret <vscale x 1 x i16> %z
30 define <vscale x 1 x i16> @zextload_nxv1i8_nxv1i16(ptr %x) {
31 ; CHECK-LABEL: zextload_nxv1i8_nxv1i16:
33 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
34 ; CHECK-NEXT: vle8.v v9, (a0)
35 ; CHECK-NEXT: vzext.vf2 v8, v9
37 %y = load <vscale x 1 x i8>, ptr %x
38 %z = zext <vscale x 1 x i8> %y to <vscale x 1 x i16>
39 ret <vscale x 1 x i16> %z
42 define <vscale x 1 x i32> @sextload_nxv1i8_nxv1i32(ptr %x) {
43 ; CHECK-LABEL: sextload_nxv1i8_nxv1i32:
45 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
46 ; CHECK-NEXT: vle8.v v9, (a0)
47 ; CHECK-NEXT: vsext.vf4 v8, v9
49 %y = load <vscale x 1 x i8>, ptr %x
50 %z = sext <vscale x 1 x i8> %y to <vscale x 1 x i32>
51 ret <vscale x 1 x i32> %z
54 define <vscale x 1 x i32> @zextload_nxv1i8_nxv1i32(ptr %x) {
55 ; CHECK-LABEL: zextload_nxv1i8_nxv1i32:
57 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
58 ; CHECK-NEXT: vle8.v v9, (a0)
59 ; CHECK-NEXT: vzext.vf4 v8, v9
61 %y = load <vscale x 1 x i8>, ptr %x
62 %z = zext <vscale x 1 x i8> %y to <vscale x 1 x i32>
63 ret <vscale x 1 x i32> %z
66 define <vscale x 1 x i64> @sextload_nxv1i8_nxv1i64(ptr %x) {
67 ; CHECK-LABEL: sextload_nxv1i8_nxv1i64:
69 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
70 ; CHECK-NEXT: vle8.v v9, (a0)
71 ; CHECK-NEXT: vsext.vf8 v8, v9
73 %y = load <vscale x 1 x i8>, ptr %x
74 %z = sext <vscale x 1 x i8> %y to <vscale x 1 x i64>
75 ret <vscale x 1 x i64> %z
78 define <vscale x 1 x i64> @zextload_nxv1i8_nxv1i64(ptr %x) {
79 ; CHECK-LABEL: zextload_nxv1i8_nxv1i64:
81 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
82 ; CHECK-NEXT: vle8.v v9, (a0)
83 ; CHECK-NEXT: vzext.vf8 v8, v9
85 %y = load <vscale x 1 x i8>, ptr %x
86 %z = zext <vscale x 1 x i8> %y to <vscale x 1 x i64>
87 ret <vscale x 1 x i64> %z
90 define <vscale x 2 x i16> @sextload_nxv2i8_nxv2i16(ptr %x) {
91 ; CHECK-LABEL: sextload_nxv2i8_nxv2i16:
93 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
94 ; CHECK-NEXT: vle8.v v9, (a0)
95 ; CHECK-NEXT: vsext.vf2 v8, v9
97 %y = load <vscale x 2 x i8>, ptr %x
98 %z = sext <vscale x 2 x i8> %y to <vscale x 2 x i16>
99 ret <vscale x 2 x i16> %z
102 define <vscale x 2 x i16> @zextload_nxv2i8_nxv2i16(ptr %x) {
103 ; CHECK-LABEL: zextload_nxv2i8_nxv2i16:
105 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
106 ; CHECK-NEXT: vle8.v v9, (a0)
107 ; CHECK-NEXT: vzext.vf2 v8, v9
109 %y = load <vscale x 2 x i8>, ptr %x
110 %z = zext <vscale x 2 x i8> %y to <vscale x 2 x i16>
111 ret <vscale x 2 x i16> %z
114 define <vscale x 2 x i32> @sextload_nxv2i8_nxv2i32(ptr %x) {
115 ; CHECK-LABEL: sextload_nxv2i8_nxv2i32:
117 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
118 ; CHECK-NEXT: vle8.v v9, (a0)
119 ; CHECK-NEXT: vsext.vf4 v8, v9
121 %y = load <vscale x 2 x i8>, ptr %x
122 %z = sext <vscale x 2 x i8> %y to <vscale x 2 x i32>
123 ret <vscale x 2 x i32> %z
126 define <vscale x 2 x i32> @zextload_nxv2i8_nxv2i32(ptr %x) {
127 ; CHECK-LABEL: zextload_nxv2i8_nxv2i32:
129 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
130 ; CHECK-NEXT: vle8.v v9, (a0)
131 ; CHECK-NEXT: vzext.vf4 v8, v9
133 %y = load <vscale x 2 x i8>, ptr %x
134 %z = zext <vscale x 2 x i8> %y to <vscale x 2 x i32>
135 ret <vscale x 2 x i32> %z
138 define <vscale x 2 x i64> @sextload_nxv2i8_nxv2i64(ptr %x) {
139 ; CHECK-LABEL: sextload_nxv2i8_nxv2i64:
141 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
142 ; CHECK-NEXT: vle8.v v10, (a0)
143 ; CHECK-NEXT: vsext.vf8 v8, v10
145 %y = load <vscale x 2 x i8>, ptr %x
146 %z = sext <vscale x 2 x i8> %y to <vscale x 2 x i64>
147 ret <vscale x 2 x i64> %z
150 define <vscale x 2 x i64> @zextload_nxv2i8_nxv2i64(ptr %x) {
151 ; CHECK-LABEL: zextload_nxv2i8_nxv2i64:
153 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
154 ; CHECK-NEXT: vle8.v v10, (a0)
155 ; CHECK-NEXT: vzext.vf8 v8, v10
157 %y = load <vscale x 2 x i8>, ptr %x
158 %z = zext <vscale x 2 x i8> %y to <vscale x 2 x i64>
159 ret <vscale x 2 x i64> %z
162 define <vscale x 4 x i16> @sextload_nxv4i8_nxv4i16(ptr %x) {
163 ; CHECK-LABEL: sextload_nxv4i8_nxv4i16:
165 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
166 ; CHECK-NEXT: vle8.v v9, (a0)
167 ; CHECK-NEXT: vsext.vf2 v8, v9
169 %y = load <vscale x 4 x i8>, ptr %x
170 %z = sext <vscale x 4 x i8> %y to <vscale x 4 x i16>
171 ret <vscale x 4 x i16> %z
174 define <vscale x 4 x i16> @zextload_nxv4i8_nxv4i16(ptr %x) {
175 ; CHECK-LABEL: zextload_nxv4i8_nxv4i16:
177 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
178 ; CHECK-NEXT: vle8.v v9, (a0)
179 ; CHECK-NEXT: vzext.vf2 v8, v9
181 %y = load <vscale x 4 x i8>, ptr %x
182 %z = zext <vscale x 4 x i8> %y to <vscale x 4 x i16>
183 ret <vscale x 4 x i16> %z
186 define <vscale x 4 x i32> @sextload_nxv4i8_nxv4i32(ptr %x) {
187 ; CHECK-LABEL: sextload_nxv4i8_nxv4i32:
189 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
190 ; CHECK-NEXT: vle8.v v10, (a0)
191 ; CHECK-NEXT: vsext.vf4 v8, v10
193 %y = load <vscale x 4 x i8>, ptr %x
194 %z = sext <vscale x 4 x i8> %y to <vscale x 4 x i32>
195 ret <vscale x 4 x i32> %z
198 define <vscale x 4 x i32> @zextload_nxv4i8_nxv4i32(ptr %x) {
199 ; CHECK-LABEL: zextload_nxv4i8_nxv4i32:
201 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
202 ; CHECK-NEXT: vle8.v v10, (a0)
203 ; CHECK-NEXT: vzext.vf4 v8, v10
205 %y = load <vscale x 4 x i8>, ptr %x
206 %z = zext <vscale x 4 x i8> %y to <vscale x 4 x i32>
207 ret <vscale x 4 x i32> %z
210 define <vscale x 4 x i64> @sextload_nxv4i8_nxv4i64(ptr %x) {
211 ; CHECK-LABEL: sextload_nxv4i8_nxv4i64:
213 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
214 ; CHECK-NEXT: vle8.v v12, (a0)
215 ; CHECK-NEXT: vsext.vf8 v8, v12
217 %y = load <vscale x 4 x i8>, ptr %x
218 %z = sext <vscale x 4 x i8> %y to <vscale x 4 x i64>
219 ret <vscale x 4 x i64> %z
222 define <vscale x 4 x i64> @zextload_nxv4i8_nxv4i64(ptr %x) {
223 ; CHECK-LABEL: zextload_nxv4i8_nxv4i64:
225 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
226 ; CHECK-NEXT: vle8.v v12, (a0)
227 ; CHECK-NEXT: vzext.vf8 v8, v12
229 %y = load <vscale x 4 x i8>, ptr %x
230 %z = zext <vscale x 4 x i8> %y to <vscale x 4 x i64>
231 ret <vscale x 4 x i64> %z
234 define <vscale x 8 x i16> @sextload_nxv8i8_nxv8i16(ptr %x) {
235 ; CHECK-LABEL: sextload_nxv8i8_nxv8i16:
237 ; CHECK-NEXT: vl1r.v v10, (a0)
238 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
239 ; CHECK-NEXT: vsext.vf2 v8, v10
241 %y = load <vscale x 8 x i8>, ptr %x
242 %z = sext <vscale x 8 x i8> %y to <vscale x 8 x i16>
243 ret <vscale x 8 x i16> %z
246 define <vscale x 8 x i16> @zextload_nxv8i8_nxv8i16(ptr %x) {
247 ; CHECK-LABEL: zextload_nxv8i8_nxv8i16:
249 ; CHECK-NEXT: vl1r.v v10, (a0)
250 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
251 ; CHECK-NEXT: vzext.vf2 v8, v10
253 %y = load <vscale x 8 x i8>, ptr %x
254 %z = zext <vscale x 8 x i8> %y to <vscale x 8 x i16>
255 ret <vscale x 8 x i16> %z
258 define <vscale x 8 x i32> @sextload_nxv8i8_nxv8i32(ptr %x) {
259 ; CHECK-LABEL: sextload_nxv8i8_nxv8i32:
261 ; CHECK-NEXT: vl1r.v v12, (a0)
262 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
263 ; CHECK-NEXT: vsext.vf4 v8, v12
265 %y = load <vscale x 8 x i8>, ptr %x
266 %z = sext <vscale x 8 x i8> %y to <vscale x 8 x i32>
267 ret <vscale x 8 x i32> %z
270 define <vscale x 8 x i32> @zextload_nxv8i8_nxv8i32(ptr %x) {
271 ; CHECK-LABEL: zextload_nxv8i8_nxv8i32:
273 ; CHECK-NEXT: vl1r.v v12, (a0)
274 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
275 ; CHECK-NEXT: vzext.vf4 v8, v12
277 %y = load <vscale x 8 x i8>, ptr %x
278 %z = zext <vscale x 8 x i8> %y to <vscale x 8 x i32>
279 ret <vscale x 8 x i32> %z
282 define <vscale x 8 x i64> @sextload_nxv8i8_nxv8i64(ptr %x) {
283 ; CHECK-LABEL: sextload_nxv8i8_nxv8i64:
285 ; CHECK-NEXT: vl1r.v v16, (a0)
286 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
287 ; CHECK-NEXT: vsext.vf8 v8, v16
289 %y = load <vscale x 8 x i8>, ptr %x
290 %z = sext <vscale x 8 x i8> %y to <vscale x 8 x i64>
291 ret <vscale x 8 x i64> %z
294 define <vscale x 8 x i64> @zextload_nxv8i8_nxv8i64(ptr %x) {
295 ; CHECK-LABEL: zextload_nxv8i8_nxv8i64:
297 ; CHECK-NEXT: vl1r.v v16, (a0)
298 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
299 ; CHECK-NEXT: vzext.vf8 v8, v16
301 %y = load <vscale x 8 x i8>, ptr %x
302 %z = zext <vscale x 8 x i8> %y to <vscale x 8 x i64>
303 ret <vscale x 8 x i64> %z
306 define <vscale x 16 x i16> @sextload_nxv16i8_nxv16i16(ptr %x) {
307 ; CHECK-LABEL: sextload_nxv16i8_nxv16i16:
309 ; CHECK-NEXT: vl2r.v v12, (a0)
310 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
311 ; CHECK-NEXT: vsext.vf2 v8, v12
313 %y = load <vscale x 16 x i8>, ptr %x
314 %z = sext <vscale x 16 x i8> %y to <vscale x 16 x i16>
315 ret <vscale x 16 x i16> %z
318 define <vscale x 16 x i16> @zextload_nxv16i8_nxv16i16(ptr %x) {
319 ; CHECK-LABEL: zextload_nxv16i8_nxv16i16:
321 ; CHECK-NEXT: vl2r.v v12, (a0)
322 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
323 ; CHECK-NEXT: vzext.vf2 v8, v12
325 %y = load <vscale x 16 x i8>, ptr %x
326 %z = zext <vscale x 16 x i8> %y to <vscale x 16 x i16>
327 ret <vscale x 16 x i16> %z
330 define <vscale x 16 x i32> @sextload_nxv16i8_nxv16i32(ptr %x) {
331 ; CHECK-LABEL: sextload_nxv16i8_nxv16i32:
333 ; CHECK-NEXT: vl2r.v v16, (a0)
334 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
335 ; CHECK-NEXT: vsext.vf4 v8, v16
337 %y = load <vscale x 16 x i8>, ptr %x
338 %z = sext <vscale x 16 x i8> %y to <vscale x 16 x i32>
339 ret <vscale x 16 x i32> %z
342 define <vscale x 16 x i32> @zextload_nxv16i8_nxv16i32(ptr %x) {
343 ; CHECK-LABEL: zextload_nxv16i8_nxv16i32:
345 ; CHECK-NEXT: vl2r.v v16, (a0)
346 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
347 ; CHECK-NEXT: vzext.vf4 v8, v16
349 %y = load <vscale x 16 x i8>, ptr %x
350 %z = zext <vscale x 16 x i8> %y to <vscale x 16 x i32>
351 ret <vscale x 16 x i32> %z
354 define <vscale x 32 x i16> @sextload_nxv32i8_nxv32i16(ptr %x) {
355 ; CHECK-LABEL: sextload_nxv32i8_nxv32i16:
357 ; CHECK-NEXT: vl4r.v v16, (a0)
358 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
359 ; CHECK-NEXT: vsext.vf2 v8, v16
361 %y = load <vscale x 32 x i8>, ptr %x
362 %z = sext <vscale x 32 x i8> %y to <vscale x 32 x i16>
363 ret <vscale x 32 x i16> %z
366 define <vscale x 32 x i16> @zextload_nxv32i8_nxv32i16(ptr %x) {
367 ; CHECK-LABEL: zextload_nxv32i8_nxv32i16:
369 ; CHECK-NEXT: vl4r.v v16, (a0)
370 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
371 ; CHECK-NEXT: vzext.vf2 v8, v16
373 %y = load <vscale x 32 x i8>, ptr %x
374 %z = zext <vscale x 32 x i8> %y to <vscale x 32 x i16>
375 ret <vscale x 32 x i16> %z
378 define void @truncstore_nxv1i8_nxv1i1(<vscale x 1 x i8> %x, ptr %z) {
379 ; CHECK-LABEL: truncstore_nxv1i8_nxv1i1:
381 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
382 ; CHECK-NEXT: vand.vi v8, v8, 1
383 ; CHECK-NEXT: vmsne.vi v8, v8, 0
384 ; CHECK-NEXT: vsm.v v8, (a0)
386 %y = trunc <vscale x 1 x i8> %x to <vscale x 1 x i1>
387 store <vscale x 1 x i1> %y, ptr %z
391 define void @truncstore_nxv1i16_nxv1i8(<vscale x 1 x i16> %x, ptr %z) {
392 ; CHECK-LABEL: truncstore_nxv1i16_nxv1i8:
394 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
395 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
396 ; CHECK-NEXT: vse8.v v8, (a0)
398 %y = trunc <vscale x 1 x i16> %x to <vscale x 1 x i8>
399 store <vscale x 1 x i8> %y, ptr %z
403 define <vscale x 1 x i32> @sextload_nxv1i16_nxv1i32(ptr %x) {
404 ; CHECK-LABEL: sextload_nxv1i16_nxv1i32:
406 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
407 ; CHECK-NEXT: vle16.v v9, (a0)
408 ; CHECK-NEXT: vsext.vf2 v8, v9
410 %y = load <vscale x 1 x i16>, ptr %x
411 %z = sext <vscale x 1 x i16> %y to <vscale x 1 x i32>
412 ret <vscale x 1 x i32> %z
415 define <vscale x 1 x i32> @zextload_nxv1i16_nxv1i32(ptr %x) {
416 ; CHECK-LABEL: zextload_nxv1i16_nxv1i32:
418 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
419 ; CHECK-NEXT: vle16.v v9, (a0)
420 ; CHECK-NEXT: vzext.vf2 v8, v9
422 %y = load <vscale x 1 x i16>, ptr %x
423 %z = zext <vscale x 1 x i16> %y to <vscale x 1 x i32>
424 ret <vscale x 1 x i32> %z
427 define <vscale x 1 x i64> @sextload_nxv1i16_nxv1i64(ptr %x) {
428 ; CHECK-LABEL: sextload_nxv1i16_nxv1i64:
430 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
431 ; CHECK-NEXT: vle16.v v9, (a0)
432 ; CHECK-NEXT: vsext.vf4 v8, v9
434 %y = load <vscale x 1 x i16>, ptr %x
435 %z = sext <vscale x 1 x i16> %y to <vscale x 1 x i64>
436 ret <vscale x 1 x i64> %z
439 define <vscale x 1 x i64> @zextload_nxv1i16_nxv1i64(ptr %x) {
440 ; CHECK-LABEL: zextload_nxv1i16_nxv1i64:
442 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
443 ; CHECK-NEXT: vle16.v v9, (a0)
444 ; CHECK-NEXT: vzext.vf4 v8, v9
446 %y = load <vscale x 1 x i16>, ptr %x
447 %z = zext <vscale x 1 x i16> %y to <vscale x 1 x i64>
448 ret <vscale x 1 x i64> %z
451 define void @truncstore_nxv2i16_nxv2i8(<vscale x 2 x i16> %x, ptr %z) {
452 ; CHECK-LABEL: truncstore_nxv2i16_nxv2i8:
454 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
455 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
456 ; CHECK-NEXT: vse8.v v8, (a0)
458 %y = trunc <vscale x 2 x i16> %x to <vscale x 2 x i8>
459 store <vscale x 2 x i8> %y, ptr %z
463 define <vscale x 2 x i32> @sextload_nxv2i16_nxv2i32(ptr %x) {
464 ; CHECK-LABEL: sextload_nxv2i16_nxv2i32:
466 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
467 ; CHECK-NEXT: vle16.v v9, (a0)
468 ; CHECK-NEXT: vsext.vf2 v8, v9
470 %y = load <vscale x 2 x i16>, ptr %x
471 %z = sext <vscale x 2 x i16> %y to <vscale x 2 x i32>
472 ret <vscale x 2 x i32> %z
475 define <vscale x 2 x i32> @zextload_nxv2i16_nxv2i32(ptr %x) {
476 ; CHECK-LABEL: zextload_nxv2i16_nxv2i32:
478 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
479 ; CHECK-NEXT: vle16.v v9, (a0)
480 ; CHECK-NEXT: vzext.vf2 v8, v9
482 %y = load <vscale x 2 x i16>, ptr %x
483 %z = zext <vscale x 2 x i16> %y to <vscale x 2 x i32>
484 ret <vscale x 2 x i32> %z
487 define <vscale x 2 x i64> @sextload_nxv2i16_nxv2i64(ptr %x) {
488 ; CHECK-LABEL: sextload_nxv2i16_nxv2i64:
490 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
491 ; CHECK-NEXT: vle16.v v10, (a0)
492 ; CHECK-NEXT: vsext.vf4 v8, v10
494 %y = load <vscale x 2 x i16>, ptr %x
495 %z = sext <vscale x 2 x i16> %y to <vscale x 2 x i64>
496 ret <vscale x 2 x i64> %z
499 define <vscale x 2 x i64> @zextload_nxv2i16_nxv2i64(ptr %x) {
500 ; CHECK-LABEL: zextload_nxv2i16_nxv2i64:
502 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
503 ; CHECK-NEXT: vle16.v v10, (a0)
504 ; CHECK-NEXT: vzext.vf4 v8, v10
506 %y = load <vscale x 2 x i16>, ptr %x
507 %z = zext <vscale x 2 x i16> %y to <vscale x 2 x i64>
508 ret <vscale x 2 x i64> %z
511 define void @truncstore_nxv4i16_nxv4i8(<vscale x 4 x i16> %x, ptr %z) {
512 ; CHECK-LABEL: truncstore_nxv4i16_nxv4i8:
514 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
515 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
516 ; CHECK-NEXT: vse8.v v8, (a0)
518 %y = trunc <vscale x 4 x i16> %x to <vscale x 4 x i8>
519 store <vscale x 4 x i8> %y, ptr %z
523 define <vscale x 4 x i32> @sextload_nxv4i16_nxv4i32(ptr %x) {
524 ; CHECK-LABEL: sextload_nxv4i16_nxv4i32:
526 ; CHECK-NEXT: vl1re16.v v10, (a0)
527 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
528 ; CHECK-NEXT: vsext.vf2 v8, v10
530 %y = load <vscale x 4 x i16>, ptr %x
531 %z = sext <vscale x 4 x i16> %y to <vscale x 4 x i32>
532 ret <vscale x 4 x i32> %z
535 define <vscale x 4 x i32> @zextload_nxv4i16_nxv4i32(ptr %x) {
536 ; CHECK-LABEL: zextload_nxv4i16_nxv4i32:
538 ; CHECK-NEXT: vl1re16.v v10, (a0)
539 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
540 ; CHECK-NEXT: vzext.vf2 v8, v10
542 %y = load <vscale x 4 x i16>, ptr %x
543 %z = zext <vscale x 4 x i16> %y to <vscale x 4 x i32>
544 ret <vscale x 4 x i32> %z
547 define <vscale x 4 x i64> @sextload_nxv4i16_nxv4i64(ptr %x) {
548 ; CHECK-LABEL: sextload_nxv4i16_nxv4i64:
550 ; CHECK-NEXT: vl1re16.v v12, (a0)
551 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
552 ; CHECK-NEXT: vsext.vf4 v8, v12
554 %y = load <vscale x 4 x i16>, ptr %x
555 %z = sext <vscale x 4 x i16> %y to <vscale x 4 x i64>
556 ret <vscale x 4 x i64> %z
559 define <vscale x 4 x i64> @zextload_nxv4i16_nxv4i64(ptr %x) {
560 ; CHECK-LABEL: zextload_nxv4i16_nxv4i64:
562 ; CHECK-NEXT: vl1re16.v v12, (a0)
563 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
564 ; CHECK-NEXT: vzext.vf4 v8, v12
566 %y = load <vscale x 4 x i16>, ptr %x
567 %z = zext <vscale x 4 x i16> %y to <vscale x 4 x i64>
568 ret <vscale x 4 x i64> %z
571 define void @truncstore_nxv8i16_nxv8i8(<vscale x 8 x i16> %x, ptr %z) {
572 ; CHECK-LABEL: truncstore_nxv8i16_nxv8i8:
574 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
575 ; CHECK-NEXT: vnsrl.wi v10, v8, 0
576 ; CHECK-NEXT: vs1r.v v10, (a0)
578 %y = trunc <vscale x 8 x i16> %x to <vscale x 8 x i8>
579 store <vscale x 8 x i8> %y, ptr %z
583 define <vscale x 8 x i32> @sextload_nxv8i16_nxv8i32(ptr %x) {
584 ; CHECK-LABEL: sextload_nxv8i16_nxv8i32:
586 ; CHECK-NEXT: vl2re16.v v12, (a0)
587 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
588 ; CHECK-NEXT: vsext.vf2 v8, v12
590 %y = load <vscale x 8 x i16>, ptr %x
591 %z = sext <vscale x 8 x i16> %y to <vscale x 8 x i32>
592 ret <vscale x 8 x i32> %z
595 define <vscale x 8 x i32> @zextload_nxv8i16_nxv8i32(ptr %x) {
596 ; CHECK-LABEL: zextload_nxv8i16_nxv8i32:
598 ; CHECK-NEXT: vl2re16.v v12, (a0)
599 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
600 ; CHECK-NEXT: vzext.vf2 v8, v12
602 %y = load <vscale x 8 x i16>, ptr %x
603 %z = zext <vscale x 8 x i16> %y to <vscale x 8 x i32>
604 ret <vscale x 8 x i32> %z
607 define <vscale x 8 x i64> @sextload_nxv8i16_nxv8i64(ptr %x) {
608 ; CHECK-LABEL: sextload_nxv8i16_nxv8i64:
610 ; CHECK-NEXT: vl2re16.v v16, (a0)
611 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
612 ; CHECK-NEXT: vsext.vf4 v8, v16
614 %y = load <vscale x 8 x i16>, ptr %x
615 %z = sext <vscale x 8 x i16> %y to <vscale x 8 x i64>
616 ret <vscale x 8 x i64> %z
619 define <vscale x 8 x i64> @zextload_nxv8i16_nxv8i64(ptr %x) {
620 ; CHECK-LABEL: zextload_nxv8i16_nxv8i64:
622 ; CHECK-NEXT: vl2re16.v v16, (a0)
623 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
624 ; CHECK-NEXT: vzext.vf4 v8, v16
626 %y = load <vscale x 8 x i16>, ptr %x
627 %z = zext <vscale x 8 x i16> %y to <vscale x 8 x i64>
628 ret <vscale x 8 x i64> %z
631 define void @truncstore_nxv16i16_nxv16i8(<vscale x 16 x i16> %x, ptr %z) {
632 ; CHECK-LABEL: truncstore_nxv16i16_nxv16i8:
634 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
635 ; CHECK-NEXT: vnsrl.wi v12, v8, 0
636 ; CHECK-NEXT: vs2r.v v12, (a0)
638 %y = trunc <vscale x 16 x i16> %x to <vscale x 16 x i8>
639 store <vscale x 16 x i8> %y, ptr %z
643 define <vscale x 16 x i32> @sextload_nxv16i16_nxv16i32(ptr %x) {
644 ; CHECK-LABEL: sextload_nxv16i16_nxv16i32:
646 ; CHECK-NEXT: vl4re16.v v16, (a0)
647 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
648 ; CHECK-NEXT: vsext.vf2 v8, v16
650 %y = load <vscale x 16 x i16>, ptr %x
651 %z = sext <vscale x 16 x i16> %y to <vscale x 16 x i32>
652 ret <vscale x 16 x i32> %z
655 define <vscale x 16 x i32> @zextload_nxv16i16_nxv16i32(ptr %x) {
656 ; CHECK-LABEL: zextload_nxv16i16_nxv16i32:
658 ; CHECK-NEXT: vl4re16.v v16, (a0)
659 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
660 ; CHECK-NEXT: vzext.vf2 v8, v16
662 %y = load <vscale x 16 x i16>, ptr %x
663 %z = zext <vscale x 16 x i16> %y to <vscale x 16 x i32>
664 ret <vscale x 16 x i32> %z
667 define void @truncstore_nxv32i16_nxv32i8(<vscale x 32 x i16> %x, ptr %z) {
668 ; CHECK-LABEL: truncstore_nxv32i16_nxv32i8:
670 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
671 ; CHECK-NEXT: vnsrl.wi v16, v8, 0
672 ; CHECK-NEXT: vs4r.v v16, (a0)
674 %y = trunc <vscale x 32 x i16> %x to <vscale x 32 x i8>
675 store <vscale x 32 x i8> %y, ptr %z
679 define void @truncstore_nxv1i32_nxv1i8(<vscale x 1 x i32> %x, ptr %z) {
680 ; CHECK-LABEL: truncstore_nxv1i32_nxv1i8:
682 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
683 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
684 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
685 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
686 ; CHECK-NEXT: vse8.v v8, (a0)
688 %y = trunc <vscale x 1 x i32> %x to <vscale x 1 x i8>
689 store <vscale x 1 x i8> %y, ptr %z
693 define void @truncstore_nxv1i32_nxv1i16(<vscale x 1 x i32> %x, ptr %z) {
694 ; CHECK-LABEL: truncstore_nxv1i32_nxv1i16:
696 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
697 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
698 ; CHECK-NEXT: vse16.v v8, (a0)
700 %y = trunc <vscale x 1 x i32> %x to <vscale x 1 x i16>
701 store <vscale x 1 x i16> %y, ptr %z
705 define <vscale x 1 x i64> @sextload_nxv1i32_nxv1i64(ptr %x) {
706 ; CHECK-LABEL: sextload_nxv1i32_nxv1i64:
708 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
709 ; CHECK-NEXT: vle32.v v9, (a0)
710 ; CHECK-NEXT: vsext.vf2 v8, v9
712 %y = load <vscale x 1 x i32>, ptr %x
713 %z = sext <vscale x 1 x i32> %y to <vscale x 1 x i64>
714 ret <vscale x 1 x i64> %z
717 define <vscale x 1 x i64> @zextload_nxv1i32_nxv1i64(ptr %x) {
718 ; CHECK-LABEL: zextload_nxv1i32_nxv1i64:
720 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
721 ; CHECK-NEXT: vle32.v v9, (a0)
722 ; CHECK-NEXT: vzext.vf2 v8, v9
724 %y = load <vscale x 1 x i32>, ptr %x
725 %z = zext <vscale x 1 x i32> %y to <vscale x 1 x i64>
726 ret <vscale x 1 x i64> %z
729 define void @truncstore_nxv2i32_nxv2i8(<vscale x 2 x i32> %x, ptr %z) {
730 ; CHECK-LABEL: truncstore_nxv2i32_nxv2i8:
732 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
733 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
734 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
735 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
736 ; CHECK-NEXT: vse8.v v8, (a0)
738 %y = trunc <vscale x 2 x i32> %x to <vscale x 2 x i8>
739 store <vscale x 2 x i8> %y, ptr %z
743 define void @truncstore_nxv2i32_nxv2i16(<vscale x 2 x i32> %x, ptr %z) {
744 ; CHECK-LABEL: truncstore_nxv2i32_nxv2i16:
746 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
747 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
748 ; CHECK-NEXT: vse16.v v8, (a0)
750 %y = trunc <vscale x 2 x i32> %x to <vscale x 2 x i16>
751 store <vscale x 2 x i16> %y, ptr %z
755 define <vscale x 2 x i64> @sextload_nxv2i32_nxv2i64(ptr %x) {
756 ; CHECK-LABEL: sextload_nxv2i32_nxv2i64:
758 ; CHECK-NEXT: vl1re32.v v10, (a0)
759 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
760 ; CHECK-NEXT: vsext.vf2 v8, v10
762 %y = load <vscale x 2 x i32>, ptr %x
763 %z = sext <vscale x 2 x i32> %y to <vscale x 2 x i64>
764 ret <vscale x 2 x i64> %z
767 define <vscale x 2 x i64> @zextload_nxv2i32_nxv2i64(ptr %x) {
768 ; CHECK-LABEL: zextload_nxv2i32_nxv2i64:
770 ; CHECK-NEXT: vl1re32.v v10, (a0)
771 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
772 ; CHECK-NEXT: vzext.vf2 v8, v10
774 %y = load <vscale x 2 x i32>, ptr %x
775 %z = zext <vscale x 2 x i32> %y to <vscale x 2 x i64>
776 ret <vscale x 2 x i64> %z
779 define void @truncstore_nxv4i32_nxv4i8(<vscale x 4 x i32> %x, ptr %z) {
780 ; CHECK-LABEL: truncstore_nxv4i32_nxv4i8:
782 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
783 ; CHECK-NEXT: vnsrl.wi v10, v8, 0
784 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
785 ; CHECK-NEXT: vnsrl.wi v8, v10, 0
786 ; CHECK-NEXT: vse8.v v8, (a0)
788 %y = trunc <vscale x 4 x i32> %x to <vscale x 4 x i8>
789 store <vscale x 4 x i8> %y, ptr %z
793 define void @truncstore_nxv4i32_nxv4i16(<vscale x 4 x i32> %x, ptr %z) {
794 ; CHECK-LABEL: truncstore_nxv4i32_nxv4i16:
796 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
797 ; CHECK-NEXT: vnsrl.wi v10, v8, 0
798 ; CHECK-NEXT: vs1r.v v10, (a0)
800 %y = trunc <vscale x 4 x i32> %x to <vscale x 4 x i16>
801 store <vscale x 4 x i16> %y, ptr %z
805 define <vscale x 4 x i64> @sextload_nxv4i32_nxv4i64(ptr %x) {
806 ; CHECK-LABEL: sextload_nxv4i32_nxv4i64:
808 ; CHECK-NEXT: vl2re32.v v12, (a0)
809 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
810 ; CHECK-NEXT: vsext.vf2 v8, v12
812 %y = load <vscale x 4 x i32>, ptr %x
813 %z = sext <vscale x 4 x i32> %y to <vscale x 4 x i64>
814 ret <vscale x 4 x i64> %z
817 define <vscale x 4 x i64> @zextload_nxv4i32_nxv4i64(ptr %x) {
818 ; CHECK-LABEL: zextload_nxv4i32_nxv4i64:
820 ; CHECK-NEXT: vl2re32.v v12, (a0)
821 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
822 ; CHECK-NEXT: vzext.vf2 v8, v12
824 %y = load <vscale x 4 x i32>, ptr %x
825 %z = zext <vscale x 4 x i32> %y to <vscale x 4 x i64>
826 ret <vscale x 4 x i64> %z
829 define void @truncstore_nxv8i32_nxv8i8(<vscale x 8 x i32> %x, ptr %z) {
830 ; CHECK-LABEL: truncstore_nxv8i32_nxv8i8:
832 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
833 ; CHECK-NEXT: vnsrl.wi v12, v8, 0
834 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
835 ; CHECK-NEXT: vnsrl.wi v8, v12, 0
836 ; CHECK-NEXT: vs1r.v v8, (a0)
838 %y = trunc <vscale x 8 x i32> %x to <vscale x 8 x i8>
839 store <vscale x 8 x i8> %y, ptr %z
843 define void @truncstore_nxv8i32_nxv8i16(<vscale x 8 x i32> %x, ptr %z) {
844 ; CHECK-LABEL: truncstore_nxv8i32_nxv8i16:
846 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
847 ; CHECK-NEXT: vnsrl.wi v12, v8, 0
848 ; CHECK-NEXT: vs2r.v v12, (a0)
850 %y = trunc <vscale x 8 x i32> %x to <vscale x 8 x i16>
851 store <vscale x 8 x i16> %y, ptr %z
855 define <vscale x 8 x i64> @sextload_nxv8i32_nxv8i64(ptr %x) {
856 ; CHECK-LABEL: sextload_nxv8i32_nxv8i64:
858 ; CHECK-NEXT: vl4re32.v v16, (a0)
859 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
860 ; CHECK-NEXT: vsext.vf2 v8, v16
862 %y = load <vscale x 8 x i32>, ptr %x
863 %z = sext <vscale x 8 x i32> %y to <vscale x 8 x i64>
864 ret <vscale x 8 x i64> %z
867 define <vscale x 8 x i64> @zextload_nxv8i32_nxv8i64(ptr %x) {
868 ; CHECK-LABEL: zextload_nxv8i32_nxv8i64:
870 ; CHECK-NEXT: vl4re32.v v16, (a0)
871 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
872 ; CHECK-NEXT: vzext.vf2 v8, v16
874 %y = load <vscale x 8 x i32>, ptr %x
875 %z = zext <vscale x 8 x i32> %y to <vscale x 8 x i64>
876 ret <vscale x 8 x i64> %z
879 define void @truncstore_nxv16i32_nxv16i8(<vscale x 16 x i32> %x, ptr %z) {
880 ; CHECK-LABEL: truncstore_nxv16i32_nxv16i8:
882 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
883 ; CHECK-NEXT: vnsrl.wi v16, v8, 0
884 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
885 ; CHECK-NEXT: vnsrl.wi v8, v16, 0
886 ; CHECK-NEXT: vs2r.v v8, (a0)
888 %y = trunc <vscale x 16 x i32> %x to <vscale x 16 x i8>
889 store <vscale x 16 x i8> %y, ptr %z
893 define void @truncstore_nxv16i32_nxv16i16(<vscale x 16 x i32> %x, ptr %z) {
894 ; CHECK-LABEL: truncstore_nxv16i32_nxv16i16:
896 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
897 ; CHECK-NEXT: vnsrl.wi v16, v8, 0
898 ; CHECK-NEXT: vs4r.v v16, (a0)
900 %y = trunc <vscale x 16 x i32> %x to <vscale x 16 x i16>
901 store <vscale x 16 x i16> %y, ptr %z
905 define void @truncstore_nxv1i64_nxv1i8(<vscale x 1 x i64> %x, ptr %z) {
906 ; CHECK-LABEL: truncstore_nxv1i64_nxv1i8:
908 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
909 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
910 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
911 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
912 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
913 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
914 ; CHECK-NEXT: vse8.v v8, (a0)
916 %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i8>
917 store <vscale x 1 x i8> %y, ptr %z
921 define void @truncstore_nxv1i64_nxv1i16(<vscale x 1 x i64> %x, ptr %z) {
922 ; CHECK-LABEL: truncstore_nxv1i64_nxv1i16:
924 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
925 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
926 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
927 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
928 ; CHECK-NEXT: vse16.v v8, (a0)
930 %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i16>
931 store <vscale x 1 x i16> %y, ptr %z
935 define void @truncstore_nxv1i64_nxv1i32(<vscale x 1 x i64> %x, ptr %z) {
936 ; CHECK-LABEL: truncstore_nxv1i64_nxv1i32:
938 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
939 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
940 ; CHECK-NEXT: vse32.v v8, (a0)
942 %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
943 store <vscale x 1 x i32> %y, ptr %z
947 define void @truncstore_nxv2i64_nxv2i8(<vscale x 2 x i64> %x, ptr %z) {
948 ; CHECK-LABEL: truncstore_nxv2i64_nxv2i8:
950 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
951 ; CHECK-NEXT: vnsrl.wi v10, v8, 0
952 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
953 ; CHECK-NEXT: vnsrl.wi v8, v10, 0
954 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
955 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
956 ; CHECK-NEXT: vse8.v v8, (a0)
958 %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i8>
959 store <vscale x 2 x i8> %y, ptr %z
963 define void @truncstore_nxv2i64_nxv2i16(<vscale x 2 x i64> %x, ptr %z) {
964 ; CHECK-LABEL: truncstore_nxv2i64_nxv2i16:
966 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
967 ; CHECK-NEXT: vnsrl.wi v10, v8, 0
968 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
969 ; CHECK-NEXT: vnsrl.wi v8, v10, 0
970 ; CHECK-NEXT: vse16.v v8, (a0)
972 %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i16>
973 store <vscale x 2 x i16> %y, ptr %z
977 define void @truncstore_nxv2i64_nxv2i32(<vscale x 2 x i64> %x, ptr %z) {
978 ; CHECK-LABEL: truncstore_nxv2i64_nxv2i32:
980 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
981 ; CHECK-NEXT: vnsrl.wi v10, v8, 0
982 ; CHECK-NEXT: vs1r.v v10, (a0)
984 %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
985 store <vscale x 2 x i32> %y, ptr %z
989 define void @truncstore_nxv4i64_nxv4i8(<vscale x 4 x i64> %x, ptr %z) {
990 ; CHECK-LABEL: truncstore_nxv4i64_nxv4i8:
992 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
993 ; CHECK-NEXT: vnsrl.wi v12, v8, 0
994 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
995 ; CHECK-NEXT: vnsrl.wi v8, v12, 0
996 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
997 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
998 ; CHECK-NEXT: vse8.v v8, (a0)
1000 %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i8>
1001 store <vscale x 4 x i8> %y, ptr %z
1005 define void @truncstore_nxv4i64_nxv4i16(<vscale x 4 x i64> %x, ptr %z) {
1006 ; CHECK-LABEL: truncstore_nxv4i64_nxv4i16:
1008 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
1009 ; CHECK-NEXT: vnsrl.wi v12, v8, 0
1010 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
1011 ; CHECK-NEXT: vnsrl.wi v8, v12, 0
1012 ; CHECK-NEXT: vs1r.v v8, (a0)
1014 %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i16>
1015 store <vscale x 4 x i16> %y, ptr %z
1019 define void @truncstore_nxv4i64_nxv4i32(<vscale x 4 x i64> %x, ptr %z) {
1020 ; CHECK-LABEL: truncstore_nxv4i64_nxv4i32:
1022 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
1023 ; CHECK-NEXT: vnsrl.wi v12, v8, 0
1024 ; CHECK-NEXT: vs2r.v v12, (a0)
1026 %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
1027 store <vscale x 4 x i32> %y, ptr %z
1031 define void @truncstore_nxv8i64_nxv8i8(<vscale x 8 x i64> %x, ptr %z) {
1032 ; CHECK-LABEL: truncstore_nxv8i64_nxv8i8:
1034 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1035 ; CHECK-NEXT: vnsrl.wi v16, v8, 0
1036 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
1037 ; CHECK-NEXT: vnsrl.wi v8, v16, 0
1038 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
1039 ; CHECK-NEXT: vnsrl.wi v10, v8, 0
1040 ; CHECK-NEXT: vs1r.v v10, (a0)
1042 %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i8>
1043 store <vscale x 8 x i8> %y, ptr %z
1047 define void @truncstore_nxv8i64_nxv8i16(<vscale x 8 x i64> %x, ptr %z) {
1048 ; CHECK-LABEL: truncstore_nxv8i64_nxv8i16:
1050 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1051 ; CHECK-NEXT: vnsrl.wi v16, v8, 0
1052 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
1053 ; CHECK-NEXT: vnsrl.wi v8, v16, 0
1054 ; CHECK-NEXT: vs2r.v v8, (a0)
1056 %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i16>
1057 store <vscale x 8 x i16> %y, ptr %z
1061 define void @truncstore_nxv8i64_nxv8i32(<vscale x 8 x i64> %x, ptr %z) {
1062 ; CHECK-LABEL: truncstore_nxv8i64_nxv8i32:
1064 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1065 ; CHECK-NEXT: vnsrl.wi v16, v8, 0
1066 ; CHECK-NEXT: vs4r.v v16, (a0)
1068 %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
1069 store <vscale x 8 x i32> %y, ptr %z
1073 define <vscale x 1 x float> @extload_nxv1f16_nxv1f32(ptr %x) {
1074 ; CHECK-LABEL: extload_nxv1f16_nxv1f32:
1076 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
1077 ; CHECK-NEXT: vle16.v v9, (a0)
1078 ; CHECK-NEXT: vfwcvt.f.f.v v8, v9
1080 %y = load <vscale x 1 x half>, ptr %x
1081 %z = fpext <vscale x 1 x half> %y to <vscale x 1 x float>
1082 ret <vscale x 1 x float> %z
1085 define <vscale x 1 x double> @extload_nxv1f16_nxv1f64(ptr %x) {
1086 ; CHECK-LABEL: extload_nxv1f16_nxv1f64:
1088 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
1089 ; CHECK-NEXT: vle16.v v8, (a0)
1090 ; CHECK-NEXT: vfwcvt.f.f.v v9, v8
1091 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
1092 ; CHECK-NEXT: vfwcvt.f.f.v v8, v9
1094 %y = load <vscale x 1 x half>, ptr %x
1095 %z = fpext <vscale x 1 x half> %y to <vscale x 1 x double>
1096 ret <vscale x 1 x double> %z
1099 define <vscale x 2 x float> @extload_nxv2f16_nxv2f32(ptr %x) {
1100 ; CHECK-LABEL: extload_nxv2f16_nxv2f32:
1102 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
1103 ; CHECK-NEXT: vle16.v v9, (a0)
1104 ; CHECK-NEXT: vfwcvt.f.f.v v8, v9
1106 %y = load <vscale x 2 x half>, ptr %x
1107 %z = fpext <vscale x 2 x half> %y to <vscale x 2 x float>
1108 ret <vscale x 2 x float> %z
1111 define <vscale x 2 x double> @extload_nxv2f16_nxv2f64(ptr %x) {
1112 ; CHECK-LABEL: extload_nxv2f16_nxv2f64:
1114 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
1115 ; CHECK-NEXT: vle16.v v8, (a0)
1116 ; CHECK-NEXT: vfwcvt.f.f.v v10, v8
1117 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
1118 ; CHECK-NEXT: vfwcvt.f.f.v v8, v10
1120 %y = load <vscale x 2 x half>, ptr %x
1121 %z = fpext <vscale x 2 x half> %y to <vscale x 2 x double>
1122 ret <vscale x 2 x double> %z
1125 define <vscale x 4 x float> @extload_nxv4f16_nxv4f32(ptr %x) {
1126 ; CHECK-LABEL: extload_nxv4f16_nxv4f32:
1128 ; CHECK-NEXT: vl1re16.v v10, (a0)
1129 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
1130 ; CHECK-NEXT: vfwcvt.f.f.v v8, v10
1132 %y = load <vscale x 4 x half>, ptr %x
1133 %z = fpext <vscale x 4 x half> %y to <vscale x 4 x float>
1134 ret <vscale x 4 x float> %z
1137 define <vscale x 4 x double> @extload_nxv4f16_nxv4f64(ptr %x) {
1138 ; CHECK-LABEL: extload_nxv4f16_nxv4f64:
1140 ; CHECK-NEXT: vl1re16.v v8, (a0)
1141 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
1142 ; CHECK-NEXT: vfwcvt.f.f.v v12, v8
1143 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
1144 ; CHECK-NEXT: vfwcvt.f.f.v v8, v12
1146 %y = load <vscale x 4 x half>, ptr %x
1147 %z = fpext <vscale x 4 x half> %y to <vscale x 4 x double>
1148 ret <vscale x 4 x double> %z
1151 define <vscale x 8 x float> @extload_nxv8f16_nxv8f32(ptr %x) {
1152 ; CHECK-LABEL: extload_nxv8f16_nxv8f32:
1154 ; CHECK-NEXT: vl2re16.v v12, (a0)
1155 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1156 ; CHECK-NEXT: vfwcvt.f.f.v v8, v12
1158 %y = load <vscale x 8 x half>, ptr %x
1159 %z = fpext <vscale x 8 x half> %y to <vscale x 8 x float>
1160 ret <vscale x 8 x float> %z
1163 define <vscale x 8 x double> @extload_nxv8f16_nxv8f64(ptr %x) {
1164 ; CHECK-LABEL: extload_nxv8f16_nxv8f64:
1166 ; CHECK-NEXT: vl2re16.v v8, (a0)
1167 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1168 ; CHECK-NEXT: vfwcvt.f.f.v v16, v8
1169 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
1170 ; CHECK-NEXT: vfwcvt.f.f.v v8, v16
1172 %y = load <vscale x 8 x half>, ptr %x
1173 %z = fpext <vscale x 8 x half> %y to <vscale x 8 x double>
1174 ret <vscale x 8 x double> %z
1177 define <vscale x 16 x float> @extload_nxv16f16_nxv16f32(ptr %x) {
1178 ; CHECK-LABEL: extload_nxv16f16_nxv16f32:
1180 ; CHECK-NEXT: vl4re16.v v16, (a0)
1181 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
1182 ; CHECK-NEXT: vfwcvt.f.f.v v8, v16
1184 %y = load <vscale x 16 x half>, ptr %x
1185 %z = fpext <vscale x 16 x half> %y to <vscale x 16 x float>
1186 ret <vscale x 16 x float> %z
1189 define void @truncstore_nxv1f32_nxv1f16(<vscale x 1 x float> %x, ptr %z) {
1190 ; CHECK-LABEL: truncstore_nxv1f32_nxv1f16:
1192 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
1193 ; CHECK-NEXT: vfncvt.f.f.w v9, v8
1194 ; CHECK-NEXT: vse16.v v9, (a0)
1196 %y = fptrunc <vscale x 1 x float> %x to <vscale x 1 x half>
1197 store <vscale x 1 x half> %y, ptr %z
1201 define <vscale x 1 x double> @extload_nxv1f32_nxv1f64(ptr %x) {
1202 ; CHECK-LABEL: extload_nxv1f32_nxv1f64:
1204 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
1205 ; CHECK-NEXT: vle32.v v9, (a0)
1206 ; CHECK-NEXT: vfwcvt.f.f.v v8, v9
1208 %y = load <vscale x 1 x float>, ptr %x
1209 %z = fpext <vscale x 1 x float> %y to <vscale x 1 x double>
1210 ret <vscale x 1 x double> %z
1213 define void @truncstore_nxv2f32_nxv2f16(<vscale x 2 x float> %x, ptr %z) {
1214 ; CHECK-LABEL: truncstore_nxv2f32_nxv2f16:
1216 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
1217 ; CHECK-NEXT: vfncvt.f.f.w v9, v8
1218 ; CHECK-NEXT: vse16.v v9, (a0)
1220 %y = fptrunc <vscale x 2 x float> %x to <vscale x 2 x half>
1221 store <vscale x 2 x half> %y, ptr %z
1225 define <vscale x 2 x double> @extload_nxv2f32_nxv2f64(ptr %x) {
1226 ; CHECK-LABEL: extload_nxv2f32_nxv2f64:
1228 ; CHECK-NEXT: vl1re32.v v10, (a0)
1229 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
1230 ; CHECK-NEXT: vfwcvt.f.f.v v8, v10
1232 %y = load <vscale x 2 x float>, ptr %x
1233 %z = fpext <vscale x 2 x float> %y to <vscale x 2 x double>
1234 ret <vscale x 2 x double> %z
1237 define void @truncstore_nxv4f32_nxv4f16(<vscale x 4 x float> %x, ptr %z) {
1238 ; CHECK-LABEL: truncstore_nxv4f32_nxv4f16:
1240 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
1241 ; CHECK-NEXT: vfncvt.f.f.w v10, v8
1242 ; CHECK-NEXT: vs1r.v v10, (a0)
1244 %y = fptrunc <vscale x 4 x float> %x to <vscale x 4 x half>
1245 store <vscale x 4 x half> %y, ptr %z
1249 define <vscale x 4 x double> @extload_nxv4f32_nxv4f64(ptr %x) {
1250 ; CHECK-LABEL: extload_nxv4f32_nxv4f64:
1252 ; CHECK-NEXT: vl2re32.v v12, (a0)
1253 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
1254 ; CHECK-NEXT: vfwcvt.f.f.v v8, v12
1256 %y = load <vscale x 4 x float>, ptr %x
1257 %z = fpext <vscale x 4 x float> %y to <vscale x 4 x double>
1258 ret <vscale x 4 x double> %z
1261 define void @truncstore_nxv8f32_nxv8f16(<vscale x 8 x float> %x, ptr %z) {
1262 ; CHECK-LABEL: truncstore_nxv8f32_nxv8f16:
1264 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
1265 ; CHECK-NEXT: vfncvt.f.f.w v12, v8
1266 ; CHECK-NEXT: vs2r.v v12, (a0)
1268 %y = fptrunc <vscale x 8 x float> %x to <vscale x 8 x half>
1269 store <vscale x 8 x half> %y, ptr %z
1273 define <vscale x 8 x double> @extload_nxv8f32_nxv8f64(ptr %x) {
1274 ; CHECK-LABEL: extload_nxv8f32_nxv8f64:
1276 ; CHECK-NEXT: vl4re32.v v16, (a0)
1277 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1278 ; CHECK-NEXT: vfwcvt.f.f.v v8, v16
1280 %y = load <vscale x 8 x float>, ptr %x
1281 %z = fpext <vscale x 8 x float> %y to <vscale x 8 x double>
1282 ret <vscale x 8 x double> %z
1285 define void @truncstore_nxv16f32_nxv16f16(<vscale x 16 x float> %x, ptr %z) {
1286 ; CHECK-LABEL: truncstore_nxv16f32_nxv16f16:
1288 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
1289 ; CHECK-NEXT: vfncvt.f.f.w v16, v8
1290 ; CHECK-NEXT: vs4r.v v16, (a0)
1292 %y = fptrunc <vscale x 16 x float> %x to <vscale x 16 x half>
1293 store <vscale x 16 x half> %y, ptr %z
1297 define void @truncstore_nxv1f64_nxv1f16(<vscale x 1 x double> %x, ptr %z) {
1298 ; CHECK-LABEL: truncstore_nxv1f64_nxv1f16:
1300 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
1301 ; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8
1302 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
1303 ; CHECK-NEXT: vfncvt.f.f.w v8, v9
1304 ; CHECK-NEXT: vse16.v v8, (a0)
1306 %y = fptrunc <vscale x 1 x double> %x to <vscale x 1 x half>
1307 store <vscale x 1 x half> %y, ptr %z
1311 define void @truncstore_nxv1f64_nxv1f32(<vscale x 1 x double> %x, ptr %z) {
1312 ; CHECK-LABEL: truncstore_nxv1f64_nxv1f32:
1314 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
1315 ; CHECK-NEXT: vfncvt.f.f.w v9, v8
1316 ; CHECK-NEXT: vse32.v v9, (a0)
1318 %y = fptrunc <vscale x 1 x double> %x to <vscale x 1 x float>
1319 store <vscale x 1 x float> %y, ptr %z
1323 define void @truncstore_nxv2f64_nxv2f16(<vscale x 2 x double> %x, ptr %z) {
1324 ; CHECK-LABEL: truncstore_nxv2f64_nxv2f16:
1326 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
1327 ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8
1328 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
1329 ; CHECK-NEXT: vfncvt.f.f.w v8, v10
1330 ; CHECK-NEXT: vse16.v v8, (a0)
1332 %y = fptrunc <vscale x 2 x double> %x to <vscale x 2 x half>
1333 store <vscale x 2 x half> %y, ptr %z
1337 define void @truncstore_nxv2f64_nxv2f32(<vscale x 2 x double> %x, ptr %z) {
1338 ; CHECK-LABEL: truncstore_nxv2f64_nxv2f32:
1340 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
1341 ; CHECK-NEXT: vfncvt.f.f.w v10, v8
1342 ; CHECK-NEXT: vs1r.v v10, (a0)
1344 %y = fptrunc <vscale x 2 x double> %x to <vscale x 2 x float>
1345 store <vscale x 2 x float> %y, ptr %z
1349 define void @truncstore_nxv4f64_nxv4f16(<vscale x 4 x double> %x, ptr %z) {
1350 ; CHECK-LABEL: truncstore_nxv4f64_nxv4f16:
1352 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
1353 ; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8
1354 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
1355 ; CHECK-NEXT: vfncvt.f.f.w v8, v12
1356 ; CHECK-NEXT: vs1r.v v8, (a0)
1358 %y = fptrunc <vscale x 4 x double> %x to <vscale x 4 x half>
1359 store <vscale x 4 x half> %y, ptr %z
1363 define void @truncstore_nxv4f64_nxv4f32(<vscale x 4 x double> %x, ptr %z) {
1364 ; CHECK-LABEL: truncstore_nxv4f64_nxv4f32:
1366 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
1367 ; CHECK-NEXT: vfncvt.f.f.w v12, v8
1368 ; CHECK-NEXT: vs2r.v v12, (a0)
1370 %y = fptrunc <vscale x 4 x double> %x to <vscale x 4 x float>
1371 store <vscale x 4 x float> %y, ptr %z
1375 define void @truncstore_nxv8f64_nxv8f16(<vscale x 8 x double> %x, ptr %z) {
1376 ; CHECK-LABEL: truncstore_nxv8f64_nxv8f16:
1378 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1379 ; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8
1380 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
1381 ; CHECK-NEXT: vfncvt.f.f.w v8, v16
1382 ; CHECK-NEXT: vs2r.v v8, (a0)
1384 %y = fptrunc <vscale x 8 x double> %x to <vscale x 8 x half>
1385 store <vscale x 8 x half> %y, ptr %z
1389 define void @truncstore_nxv8f64_nxv8f32(<vscale x 8 x double> %x, ptr %z) {
1390 ; CHECK-LABEL: truncstore_nxv8f64_nxv8f32:
1392 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1393 ; CHECK-NEXT: vfncvt.f.f.w v16, v8
1394 ; CHECK-NEXT: vs4r.v v16, (a0)
1396 %y = fptrunc <vscale x 8 x double> %x to <vscale x 8 x float>
1397 store <vscale x 8 x float> %y, ptr %z