1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
3 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
5 define <2 x i16> @vwsubu_v2i16(ptr %x, ptr %y) {
6 ; CHECK-LABEL: vwsubu_v2i16:
8 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
9 ; CHECK-NEXT: vle8.v v9, (a0)
10 ; CHECK-NEXT: vle8.v v10, (a1)
11 ; CHECK-NEXT: vwsubu.vv v8, v9, v10
13 %a = load <2 x i8>, ptr %x
14 %b = load <2 x i8>, ptr %y
15 %c = zext <2 x i8> %a to <2 x i16>
16 %d = zext <2 x i8> %b to <2 x i16>
17 %e = sub <2 x i16> %c, %d
21 define <4 x i16> @vwsubu_v4i16(ptr %x, ptr %y) {
22 ; CHECK-LABEL: vwsubu_v4i16:
24 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
25 ; CHECK-NEXT: vle8.v v9, (a0)
26 ; CHECK-NEXT: vle8.v v10, (a1)
27 ; CHECK-NEXT: vwsubu.vv v8, v9, v10
29 %a = load <4 x i8>, ptr %x
30 %b = load <4 x i8>, ptr %y
31 %c = zext <4 x i8> %a to <4 x i16>
32 %d = zext <4 x i8> %b to <4 x i16>
33 %e = sub <4 x i16> %c, %d
37 define <2 x i32> @vwsubu_v2i32(ptr %x, ptr %y) {
38 ; CHECK-LABEL: vwsubu_v2i32:
40 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
41 ; CHECK-NEXT: vle16.v v9, (a0)
42 ; CHECK-NEXT: vle16.v v10, (a1)
43 ; CHECK-NEXT: vwsubu.vv v8, v9, v10
45 %a = load <2 x i16>, ptr %x
46 %b = load <2 x i16>, ptr %y
47 %c = zext <2 x i16> %a to <2 x i32>
48 %d = zext <2 x i16> %b to <2 x i32>
49 %e = sub <2 x i32> %c, %d
53 define <8 x i16> @vwsubu_v8i16(ptr %x, ptr %y) {
54 ; CHECK-LABEL: vwsubu_v8i16:
56 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
57 ; CHECK-NEXT: vle8.v v9, (a0)
58 ; CHECK-NEXT: vle8.v v10, (a1)
59 ; CHECK-NEXT: vwsubu.vv v8, v9, v10
61 %a = load <8 x i8>, ptr %x
62 %b = load <8 x i8>, ptr %y
63 %c = zext <8 x i8> %a to <8 x i16>
64 %d = zext <8 x i8> %b to <8 x i16>
65 %e = sub <8 x i16> %c, %d
69 define <4 x i32> @vwsubu_v4i32(ptr %x, ptr %y) {
70 ; CHECK-LABEL: vwsubu_v4i32:
72 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
73 ; CHECK-NEXT: vle16.v v9, (a0)
74 ; CHECK-NEXT: vle16.v v10, (a1)
75 ; CHECK-NEXT: vwsubu.vv v8, v9, v10
77 %a = load <4 x i16>, ptr %x
78 %b = load <4 x i16>, ptr %y
79 %c = zext <4 x i16> %a to <4 x i32>
80 %d = zext <4 x i16> %b to <4 x i32>
81 %e = sub <4 x i32> %c, %d
85 define <2 x i64> @vwsubu_v2i64(ptr %x, ptr %y) {
86 ; CHECK-LABEL: vwsubu_v2i64:
88 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
89 ; CHECK-NEXT: vle32.v v9, (a0)
90 ; CHECK-NEXT: vle32.v v10, (a1)
91 ; CHECK-NEXT: vwsubu.vv v8, v9, v10
93 %a = load <2 x i32>, ptr %x
94 %b = load <2 x i32>, ptr %y
95 %c = zext <2 x i32> %a to <2 x i64>
96 %d = zext <2 x i32> %b to <2 x i64>
97 %e = sub <2 x i64> %c, %d
101 define <16 x i16> @vwsubu_v16i16(ptr %x, ptr %y) {
102 ; CHECK-LABEL: vwsubu_v16i16:
104 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
105 ; CHECK-NEXT: vle8.v v10, (a0)
106 ; CHECK-NEXT: vle8.v v11, (a1)
107 ; CHECK-NEXT: vwsubu.vv v8, v10, v11
109 %a = load <16 x i8>, ptr %x
110 %b = load <16 x i8>, ptr %y
111 %c = zext <16 x i8> %a to <16 x i16>
112 %d = zext <16 x i8> %b to <16 x i16>
113 %e = sub <16 x i16> %c, %d
117 define <8 x i32> @vwsubu_v8i32(ptr %x, ptr %y) {
118 ; CHECK-LABEL: vwsubu_v8i32:
120 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
121 ; CHECK-NEXT: vle16.v v10, (a0)
122 ; CHECK-NEXT: vle16.v v11, (a1)
123 ; CHECK-NEXT: vwsubu.vv v8, v10, v11
125 %a = load <8 x i16>, ptr %x
126 %b = load <8 x i16>, ptr %y
127 %c = zext <8 x i16> %a to <8 x i32>
128 %d = zext <8 x i16> %b to <8 x i32>
129 %e = sub <8 x i32> %c, %d
133 define <4 x i64> @vwsubu_v4i64(ptr %x, ptr %y) {
134 ; CHECK-LABEL: vwsubu_v4i64:
136 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
137 ; CHECK-NEXT: vle32.v v10, (a0)
138 ; CHECK-NEXT: vle32.v v11, (a1)
139 ; CHECK-NEXT: vwsubu.vv v8, v10, v11
141 %a = load <4 x i32>, ptr %x
142 %b = load <4 x i32>, ptr %y
143 %c = zext <4 x i32> %a to <4 x i64>
144 %d = zext <4 x i32> %b to <4 x i64>
145 %e = sub <4 x i64> %c, %d
149 define <32 x i16> @vwsubu_v32i16(ptr %x, ptr %y) {
150 ; CHECK-LABEL: vwsubu_v32i16:
152 ; CHECK-NEXT: li a2, 32
153 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
154 ; CHECK-NEXT: vle8.v v12, (a0)
155 ; CHECK-NEXT: vle8.v v14, (a1)
156 ; CHECK-NEXT: vwsubu.vv v8, v12, v14
158 %a = load <32 x i8>, ptr %x
159 %b = load <32 x i8>, ptr %y
160 %c = zext <32 x i8> %a to <32 x i16>
161 %d = zext <32 x i8> %b to <32 x i16>
162 %e = sub <32 x i16> %c, %d
166 define <16 x i32> @vwsubu_v16i32(ptr %x, ptr %y) {
167 ; CHECK-LABEL: vwsubu_v16i32:
169 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
170 ; CHECK-NEXT: vle16.v v12, (a0)
171 ; CHECK-NEXT: vle16.v v14, (a1)
172 ; CHECK-NEXT: vwsubu.vv v8, v12, v14
174 %a = load <16 x i16>, ptr %x
175 %b = load <16 x i16>, ptr %y
176 %c = zext <16 x i16> %a to <16 x i32>
177 %d = zext <16 x i16> %b to <16 x i32>
178 %e = sub <16 x i32> %c, %d
182 define <8 x i64> @vwsubu_v8i64(ptr %x, ptr %y) {
183 ; CHECK-LABEL: vwsubu_v8i64:
185 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
186 ; CHECK-NEXT: vle32.v v12, (a0)
187 ; CHECK-NEXT: vle32.v v14, (a1)
188 ; CHECK-NEXT: vwsubu.vv v8, v12, v14
190 %a = load <8 x i32>, ptr %x
191 %b = load <8 x i32>, ptr %y
192 %c = zext <8 x i32> %a to <8 x i64>
193 %d = zext <8 x i32> %b to <8 x i64>
194 %e = sub <8 x i64> %c, %d
198 define <64 x i16> @vwsubu_v64i16(ptr %x, ptr %y) {
199 ; CHECK-LABEL: vwsubu_v64i16:
201 ; CHECK-NEXT: li a2, 64
202 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
203 ; CHECK-NEXT: vle8.v v16, (a0)
204 ; CHECK-NEXT: vle8.v v20, (a1)
205 ; CHECK-NEXT: vwsubu.vv v8, v16, v20
207 %a = load <64 x i8>, ptr %x
208 %b = load <64 x i8>, ptr %y
209 %c = zext <64 x i8> %a to <64 x i16>
210 %d = zext <64 x i8> %b to <64 x i16>
211 %e = sub <64 x i16> %c, %d
215 define <32 x i32> @vwsubu_v32i32(ptr %x, ptr %y) {
216 ; CHECK-LABEL: vwsubu_v32i32:
218 ; CHECK-NEXT: li a2, 32
219 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
220 ; CHECK-NEXT: vle16.v v16, (a0)
221 ; CHECK-NEXT: vle16.v v20, (a1)
222 ; CHECK-NEXT: vwsubu.vv v8, v16, v20
224 %a = load <32 x i16>, ptr %x
225 %b = load <32 x i16>, ptr %y
226 %c = zext <32 x i16> %a to <32 x i32>
227 %d = zext <32 x i16> %b to <32 x i32>
228 %e = sub <32 x i32> %c, %d
232 define <16 x i64> @vwsubu_v16i64(ptr %x, ptr %y) {
233 ; CHECK-LABEL: vwsubu_v16i64:
235 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
236 ; CHECK-NEXT: vle32.v v16, (a0)
237 ; CHECK-NEXT: vle32.v v20, (a1)
238 ; CHECK-NEXT: vwsubu.vv v8, v16, v20
240 %a = load <16 x i32>, ptr %x
241 %b = load <16 x i32>, ptr %y
242 %c = zext <16 x i32> %a to <16 x i64>
243 %d = zext <16 x i32> %b to <16 x i64>
244 %e = sub <16 x i64> %c, %d
248 define <128 x i16> @vwsubu_v128i16(ptr %x, ptr %y) nounwind {
249 ; CHECK-LABEL: vwsubu_v128i16:
251 ; CHECK-NEXT: addi sp, sp, -16
252 ; CHECK-NEXT: csrr a2, vlenb
253 ; CHECK-NEXT: slli a2, a2, 4
254 ; CHECK-NEXT: sub sp, sp, a2
255 ; CHECK-NEXT: li a2, 128
256 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
257 ; CHECK-NEXT: vle8.v v8, (a0)
258 ; CHECK-NEXT: addi a0, sp, 16
259 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
260 ; CHECK-NEXT: vle8.v v0, (a1)
261 ; CHECK-NEXT: li a0, 64
262 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
263 ; CHECK-NEXT: vslidedown.vx v16, v8, a0
264 ; CHECK-NEXT: vslidedown.vx v8, v0, a0
265 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
266 ; CHECK-NEXT: vwsubu.vv v24, v16, v8
267 ; CHECK-NEXT: csrr a0, vlenb
268 ; CHECK-NEXT: slli a0, a0, 3
269 ; CHECK-NEXT: add a0, sp, a0
270 ; CHECK-NEXT: addi a0, a0, 16
271 ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
272 ; CHECK-NEXT: addi a0, sp, 16
273 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
274 ; CHECK-NEXT: vwsubu.vv v8, v16, v0
275 ; CHECK-NEXT: csrr a0, vlenb
276 ; CHECK-NEXT: slli a0, a0, 3
277 ; CHECK-NEXT: add a0, sp, a0
278 ; CHECK-NEXT: addi a0, a0, 16
279 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
280 ; CHECK-NEXT: csrr a0, vlenb
281 ; CHECK-NEXT: slli a0, a0, 4
282 ; CHECK-NEXT: add sp, sp, a0
283 ; CHECK-NEXT: addi sp, sp, 16
285 %a = load <128 x i8>, ptr %x
286 %b = load <128 x i8>, ptr %y
287 %c = zext <128 x i8> %a to <128 x i16>
288 %d = zext <128 x i8> %b to <128 x i16>
289 %e = sub <128 x i16> %c, %d
293 define <64 x i32> @vwsubu_v64i32(ptr %x, ptr %y) nounwind {
294 ; CHECK-LABEL: vwsubu_v64i32:
296 ; CHECK-NEXT: addi sp, sp, -16
297 ; CHECK-NEXT: csrr a2, vlenb
298 ; CHECK-NEXT: slli a2, a2, 4
299 ; CHECK-NEXT: sub sp, sp, a2
300 ; CHECK-NEXT: li a2, 64
301 ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma
302 ; CHECK-NEXT: vle16.v v8, (a0)
303 ; CHECK-NEXT: addi a0, sp, 16
304 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
305 ; CHECK-NEXT: vle16.v v0, (a1)
306 ; CHECK-NEXT: li a0, 32
307 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
308 ; CHECK-NEXT: vslidedown.vx v16, v8, a0
309 ; CHECK-NEXT: vslidedown.vx v8, v0, a0
310 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
311 ; CHECK-NEXT: vwsubu.vv v24, v16, v8
312 ; CHECK-NEXT: csrr a0, vlenb
313 ; CHECK-NEXT: slli a0, a0, 3
314 ; CHECK-NEXT: add a0, sp, a0
315 ; CHECK-NEXT: addi a0, a0, 16
316 ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
317 ; CHECK-NEXT: addi a0, sp, 16
318 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
319 ; CHECK-NEXT: vwsubu.vv v8, v16, v0
320 ; CHECK-NEXT: csrr a0, vlenb
321 ; CHECK-NEXT: slli a0, a0, 3
322 ; CHECK-NEXT: add a0, sp, a0
323 ; CHECK-NEXT: addi a0, a0, 16
324 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
325 ; CHECK-NEXT: csrr a0, vlenb
326 ; CHECK-NEXT: slli a0, a0, 4
327 ; CHECK-NEXT: add sp, sp, a0
328 ; CHECK-NEXT: addi sp, sp, 16
330 %a = load <64 x i16>, ptr %x
331 %b = load <64 x i16>, ptr %y
332 %c = zext <64 x i16> %a to <64 x i32>
333 %d = zext <64 x i16> %b to <64 x i32>
334 %e = sub <64 x i32> %c, %d
338 define <32 x i64> @vwsubu_v32i64(ptr %x, ptr %y) nounwind {
339 ; CHECK-LABEL: vwsubu_v32i64:
341 ; CHECK-NEXT: addi sp, sp, -16
342 ; CHECK-NEXT: csrr a2, vlenb
343 ; CHECK-NEXT: slli a2, a2, 4
344 ; CHECK-NEXT: sub sp, sp, a2
345 ; CHECK-NEXT: li a2, 32
346 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
347 ; CHECK-NEXT: vle32.v v8, (a0)
348 ; CHECK-NEXT: addi a0, sp, 16
349 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
350 ; CHECK-NEXT: vle32.v v0, (a1)
351 ; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
352 ; CHECK-NEXT: vslidedown.vi v16, v8, 16
353 ; CHECK-NEXT: vslidedown.vi v8, v0, 16
354 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
355 ; CHECK-NEXT: vwsubu.vv v24, v16, v8
356 ; CHECK-NEXT: csrr a0, vlenb
357 ; CHECK-NEXT: slli a0, a0, 3
358 ; CHECK-NEXT: add a0, sp, a0
359 ; CHECK-NEXT: addi a0, a0, 16
360 ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
361 ; CHECK-NEXT: addi a0, sp, 16
362 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
363 ; CHECK-NEXT: vwsubu.vv v8, v16, v0
364 ; CHECK-NEXT: csrr a0, vlenb
365 ; CHECK-NEXT: slli a0, a0, 3
366 ; CHECK-NEXT: add a0, sp, a0
367 ; CHECK-NEXT: addi a0, a0, 16
368 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
369 ; CHECK-NEXT: csrr a0, vlenb
370 ; CHECK-NEXT: slli a0, a0, 4
371 ; CHECK-NEXT: add sp, sp, a0
372 ; CHECK-NEXT: addi sp, sp, 16
374 %a = load <32 x i32>, ptr %x
375 %b = load <32 x i32>, ptr %y
376 %c = zext <32 x i32> %a to <32 x i64>
377 %d = zext <32 x i32> %b to <32 x i64>
378 %e = sub <32 x i64> %c, %d
382 define <2 x i32> @vwsubu_v2i32_v2i8(ptr %x, ptr %y) {
383 ; CHECK-LABEL: vwsubu_v2i32_v2i8:
385 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
386 ; CHECK-NEXT: vle8.v v8, (a0)
387 ; CHECK-NEXT: vle8.v v9, (a1)
388 ; CHECK-NEXT: vwsubu.vv v10, v8, v9
389 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
390 ; CHECK-NEXT: vsext.vf2 v8, v10
392 %a = load <2 x i8>, ptr %x
393 %b = load <2 x i8>, ptr %y
394 %c = zext <2 x i8> %a to <2 x i32>
395 %d = zext <2 x i8> %b to <2 x i32>
396 %e = sub <2 x i32> %c, %d
400 define <4 x i32> @vwsubu_v4i32_v4i8_v4i16(ptr %x, ptr %y) {
401 ; CHECK-LABEL: vwsubu_v4i32_v4i8_v4i16:
403 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
404 ; CHECK-NEXT: vle8.v v8, (a0)
405 ; CHECK-NEXT: vle16.v v9, (a1)
406 ; CHECK-NEXT: vzext.vf2 v10, v8
407 ; CHECK-NEXT: vwsubu.vv v8, v10, v9
409 %a = load <4 x i8>, ptr %x
410 %b = load <4 x i16>, ptr %y
411 %c = zext <4 x i8> %a to <4 x i32>
412 %d = zext <4 x i16> %b to <4 x i32>
413 %e = sub <4 x i32> %c, %d
417 define <4 x i64> @vwsubu_v4i64_v4i32_v4i8(ptr %x, ptr %y) {
418 ; CHECK-LABEL: vwsubu_v4i64_v4i32_v4i8:
420 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
421 ; CHECK-NEXT: vle8.v v8, (a1)
422 ; CHECK-NEXT: vle32.v v10, (a0)
423 ; CHECK-NEXT: vzext.vf4 v11, v8
424 ; CHECK-NEXT: vwsubu.vv v8, v10, v11
426 %a = load <4 x i32>, ptr %x
427 %b = load <4 x i8>, ptr %y
428 %c = zext <4 x i32> %a to <4 x i64>
429 %d = zext <4 x i8> %b to <4 x i64>
430 %e = sub <4 x i64> %c, %d
434 define <2 x i16> @vwsubu_vx_v2i16(ptr %x, i8 %y) {
435 ; CHECK-LABEL: vwsubu_vx_v2i16:
437 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
438 ; CHECK-NEXT: vle8.v v9, (a0)
439 ; CHECK-NEXT: vwsubu.vx v8, v9, a1
441 %a = load <2 x i8>, ptr %x
442 %b = insertelement <2 x i8> poison, i8 %y, i32 0
443 %c = shufflevector <2 x i8> %b, <2 x i8> poison, <2 x i32> zeroinitializer
444 %d = zext <2 x i8> %a to <2 x i16>
445 %e = zext <2 x i8> %c to <2 x i16>
446 %f = sub <2 x i16> %d, %e
450 define <4 x i16> @vwsubu_vx_v4i16(ptr %x, i8 %y) {
451 ; CHECK-LABEL: vwsubu_vx_v4i16:
453 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
454 ; CHECK-NEXT: vle8.v v9, (a0)
455 ; CHECK-NEXT: vwsubu.vx v8, v9, a1
457 %a = load <4 x i8>, ptr %x
458 %b = insertelement <4 x i8> poison, i8 %y, i32 0
459 %c = shufflevector <4 x i8> %b, <4 x i8> poison, <4 x i32> zeroinitializer
460 %d = zext <4 x i8> %a to <4 x i16>
461 %e = zext <4 x i8> %c to <4 x i16>
462 %f = sub <4 x i16> %d, %e
466 define <2 x i32> @vwsubu_vx_v2i32(ptr %x, i16 %y) {
467 ; CHECK-LABEL: vwsubu_vx_v2i32:
469 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
470 ; CHECK-NEXT: vle16.v v9, (a0)
471 ; CHECK-NEXT: vwsubu.vx v8, v9, a1
473 %a = load <2 x i16>, ptr %x
474 %b = insertelement <2 x i16> poison, i16 %y, i32 0
475 %c = shufflevector <2 x i16> %b, <2 x i16> poison, <2 x i32> zeroinitializer
476 %d = zext <2 x i16> %a to <2 x i32>
477 %e = zext <2 x i16> %c to <2 x i32>
478 %f = sub <2 x i32> %d, %e
482 define <8 x i16> @vwsubu_vx_v8i16(ptr %x, i8 %y) {
483 ; CHECK-LABEL: vwsubu_vx_v8i16:
485 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
486 ; CHECK-NEXT: vle8.v v9, (a0)
487 ; CHECK-NEXT: vwsubu.vx v8, v9, a1
489 %a = load <8 x i8>, ptr %x
490 %b = insertelement <8 x i8> poison, i8 %y, i32 0
491 %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
492 %d = zext <8 x i8> %a to <8 x i16>
493 %e = zext <8 x i8> %c to <8 x i16>
494 %f = sub <8 x i16> %d, %e
498 define <4 x i32> @vwsubu_vx_v4i32(ptr %x, i16 %y) {
499 ; CHECK-LABEL: vwsubu_vx_v4i32:
501 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
502 ; CHECK-NEXT: vle16.v v9, (a0)
503 ; CHECK-NEXT: vwsubu.vx v8, v9, a1
505 %a = load <4 x i16>, ptr %x
506 %b = insertelement <4 x i16> poison, i16 %y, i32 0
507 %c = shufflevector <4 x i16> %b, <4 x i16> poison, <4 x i32> zeroinitializer
508 %d = zext <4 x i16> %a to <4 x i32>
509 %e = zext <4 x i16> %c to <4 x i32>
510 %f = sub <4 x i32> %d, %e
514 define <2 x i64> @vwsubu_vx_v2i64(ptr %x, i32 %y) {
515 ; CHECK-LABEL: vwsubu_vx_v2i64:
517 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
518 ; CHECK-NEXT: vle32.v v9, (a0)
519 ; CHECK-NEXT: vwsubu.vx v8, v9, a1
521 %a = load <2 x i32>, ptr %x
522 %b = insertelement <2 x i32> poison, i32 %y, i64 0
523 %c = shufflevector <2 x i32> %b, <2 x i32> poison, <2 x i32> zeroinitializer
524 %d = zext <2 x i32> %a to <2 x i64>
525 %e = zext <2 x i32> %c to <2 x i64>
526 %f = sub <2 x i64> %d, %e
530 define <16 x i16> @vwsubu_vx_v16i16(ptr %x, i8 %y) {
531 ; CHECK-LABEL: vwsubu_vx_v16i16:
533 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
534 ; CHECK-NEXT: vle8.v v10, (a0)
535 ; CHECK-NEXT: vwsubu.vx v8, v10, a1
537 %a = load <16 x i8>, ptr %x
538 %b = insertelement <16 x i8> poison, i8 %y, i32 0
539 %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
540 %d = zext <16 x i8> %a to <16 x i16>
541 %e = zext <16 x i8> %c to <16 x i16>
542 %f = sub <16 x i16> %d, %e
546 define <8 x i32> @vwsubu_vx_v8i32(ptr %x, i16 %y) {
547 ; CHECK-LABEL: vwsubu_vx_v8i32:
549 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
550 ; CHECK-NEXT: vle16.v v10, (a0)
551 ; CHECK-NEXT: vwsubu.vx v8, v10, a1
553 %a = load <8 x i16>, ptr %x
554 %b = insertelement <8 x i16> poison, i16 %y, i32 0
555 %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
556 %d = zext <8 x i16> %a to <8 x i32>
557 %e = zext <8 x i16> %c to <8 x i32>
558 %f = sub <8 x i32> %d, %e
562 define <4 x i64> @vwsubu_vx_v4i64(ptr %x, i32 %y) {
563 ; CHECK-LABEL: vwsubu_vx_v4i64:
565 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
566 ; CHECK-NEXT: vle32.v v10, (a0)
567 ; CHECK-NEXT: vwsubu.vx v8, v10, a1
569 %a = load <4 x i32>, ptr %x
570 %b = insertelement <4 x i32> poison, i32 %y, i64 0
571 %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
572 %d = zext <4 x i32> %a to <4 x i64>
573 %e = zext <4 x i32> %c to <4 x i64>
574 %f = sub <4 x i64> %d, %e
578 define <32 x i16> @vwsubu_vx_v32i16(ptr %x, i8 %y) {
579 ; CHECK-LABEL: vwsubu_vx_v32i16:
581 ; CHECK-NEXT: li a2, 32
582 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
583 ; CHECK-NEXT: vle8.v v12, (a0)
584 ; CHECK-NEXT: vwsubu.vx v8, v12, a1
586 %a = load <32 x i8>, ptr %x
587 %b = insertelement <32 x i8> poison, i8 %y, i32 0
588 %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
589 %d = zext <32 x i8> %a to <32 x i16>
590 %e = zext <32 x i8> %c to <32 x i16>
591 %f = sub <32 x i16> %d, %e
595 define <16 x i32> @vwsubu_vx_v16i32(ptr %x, i16 %y) {
596 ; CHECK-LABEL: vwsubu_vx_v16i32:
598 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
599 ; CHECK-NEXT: vle16.v v12, (a0)
600 ; CHECK-NEXT: vwsubu.vx v8, v12, a1
602 %a = load <16 x i16>, ptr %x
603 %b = insertelement <16 x i16> poison, i16 %y, i32 0
604 %c = shufflevector <16 x i16> %b, <16 x i16> poison, <16 x i32> zeroinitializer
605 %d = zext <16 x i16> %a to <16 x i32>
606 %e = zext <16 x i16> %c to <16 x i32>
607 %f = sub <16 x i32> %d, %e
611 define <8 x i64> @vwsubu_vx_v8i64(ptr %x, i32 %y) {
612 ; CHECK-LABEL: vwsubu_vx_v8i64:
614 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
615 ; CHECK-NEXT: vle32.v v12, (a0)
616 ; CHECK-NEXT: vwsubu.vx v8, v12, a1
618 %a = load <8 x i32>, ptr %x
619 %b = insertelement <8 x i32> poison, i32 %y, i64 0
620 %c = shufflevector <8 x i32> %b, <8 x i32> poison, <8 x i32> zeroinitializer
621 %d = zext <8 x i32> %a to <8 x i64>
622 %e = zext <8 x i32> %c to <8 x i64>
623 %f = sub <8 x i64> %d, %e
627 define <64 x i16> @vwsubu_vx_v64i16(ptr %x, i8 %y) {
628 ; CHECK-LABEL: vwsubu_vx_v64i16:
630 ; CHECK-NEXT: li a2, 64
631 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
632 ; CHECK-NEXT: vle8.v v16, (a0)
633 ; CHECK-NEXT: vwsubu.vx v8, v16, a1
635 %a = load <64 x i8>, ptr %x
636 %b = insertelement <64 x i8> poison, i8 %y, i32 0
637 %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
638 %d = zext <64 x i8> %a to <64 x i16>
639 %e = zext <64 x i8> %c to <64 x i16>
640 %f = sub <64 x i16> %d, %e
644 define <32 x i32> @vwsubu_vx_v32i32(ptr %x, i16 %y) {
645 ; CHECK-LABEL: vwsubu_vx_v32i32:
647 ; CHECK-NEXT: li a2, 32
648 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
649 ; CHECK-NEXT: vle16.v v16, (a0)
650 ; CHECK-NEXT: vwsubu.vx v8, v16, a1
652 %a = load <32 x i16>, ptr %x
653 %b = insertelement <32 x i16> poison, i16 %y, i32 0
654 %c = shufflevector <32 x i16> %b, <32 x i16> poison, <32 x i32> zeroinitializer
655 %d = zext <32 x i16> %a to <32 x i32>
656 %e = zext <32 x i16> %c to <32 x i32>
657 %f = sub <32 x i32> %d, %e
661 define <16 x i64> @vwsubu_vx_v16i64(ptr %x, i32 %y) {
662 ; CHECK-LABEL: vwsubu_vx_v16i64:
664 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
665 ; CHECK-NEXT: vle32.v v16, (a0)
666 ; CHECK-NEXT: vwsubu.vx v8, v16, a1
668 %a = load <16 x i32>, ptr %x
669 %b = insertelement <16 x i32> poison, i32 %y, i64 0
670 %c = shufflevector <16 x i32> %b, <16 x i32> poison, <16 x i32> zeroinitializer
671 %d = zext <16 x i32> %a to <16 x i64>
672 %e = zext <16 x i32> %c to <16 x i64>
673 %f = sub <16 x i64> %d, %e
677 define <8 x i16> @vwsubu_vx_v8i16_i8(ptr %x, ptr %y) {
678 ; CHECK-LABEL: vwsubu_vx_v8i16_i8:
680 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
681 ; CHECK-NEXT: vle8.v v9, (a0)
682 ; CHECK-NEXT: vlse8.v v10, (a1), zero
683 ; CHECK-NEXT: vwsubu.vv v8, v10, v9
685 %a = load <8 x i8>, ptr %x
687 %c = zext i8 %b to i16
688 %d = insertelement <8 x i16> poison, i16 %c, i32 0
689 %e = shufflevector <8 x i16> %d, <8 x i16> poison, <8 x i32> zeroinitializer
690 %f = zext <8 x i8> %a to <8 x i16>
691 %g = sub <8 x i16> %e, %f
695 define <8 x i16> @vwsubu_vx_v8i16_i16(ptr %x, ptr %y) {
696 ; CHECK-LABEL: vwsubu_vx_v8i16_i16:
698 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
699 ; CHECK-NEXT: vle8.v v9, (a0)
700 ; CHECK-NEXT: vlse16.v v8, (a1), zero
701 ; CHECK-NEXT: vwsubu.wv v8, v8, v9
703 %a = load <8 x i8>, ptr %x
704 %b = load i16, ptr %y
705 %d = insertelement <8 x i16> poison, i16 %b, i32 0
706 %e = shufflevector <8 x i16> %d, <8 x i16> poison, <8 x i32> zeroinitializer
707 %f = zext <8 x i8> %a to <8 x i16>
708 %g = sub <8 x i16> %e, %f
712 define <4 x i32> @vwsubu_vx_v4i32_i8(ptr %x, ptr %y) {
713 ; CHECK-LABEL: vwsubu_vx_v4i32_i8:
715 ; CHECK-NEXT: lbu a1, 0(a1)
716 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
717 ; CHECK-NEXT: vle16.v v9, (a0)
718 ; CHECK-NEXT: vmv.v.x v10, a1
719 ; CHECK-NEXT: vwsubu.vv v8, v10, v9
721 %a = load <4 x i16>, ptr %x
723 %c = zext i8 %b to i32
724 %d = insertelement <4 x i32> poison, i32 %c, i32 0
725 %e = shufflevector <4 x i32> %d, <4 x i32> poison, <4 x i32> zeroinitializer
726 %f = zext <4 x i16> %a to <4 x i32>
727 %g = sub <4 x i32> %e, %f
731 define <4 x i32> @vwsubu_vx_v4i32_i16(ptr %x, ptr %y) {
732 ; CHECK-LABEL: vwsubu_vx_v4i32_i16:
734 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
735 ; CHECK-NEXT: vle16.v v9, (a0)
736 ; CHECK-NEXT: vlse16.v v10, (a1), zero
737 ; CHECK-NEXT: vwsubu.vv v8, v10, v9
739 %a = load <4 x i16>, ptr %x
740 %b = load i16, ptr %y
741 %c = zext i16 %b to i32
742 %d = insertelement <4 x i32> poison, i32 %c, i32 0
743 %e = shufflevector <4 x i32> %d, <4 x i32> poison, <4 x i32> zeroinitializer
744 %f = zext <4 x i16> %a to <4 x i32>
745 %g = sub <4 x i32> %e, %f
749 define <4 x i32> @vwsubu_vx_v4i32_i32(ptr %x, ptr %y) {
750 ; CHECK-LABEL: vwsubu_vx_v4i32_i32:
752 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
753 ; CHECK-NEXT: vle16.v v9, (a0)
754 ; CHECK-NEXT: vlse32.v v8, (a1), zero
755 ; CHECK-NEXT: vwsubu.wv v8, v8, v9
757 %a = load <4 x i16>, ptr %x
758 %b = load i32, ptr %y
759 %d = insertelement <4 x i32> poison, i32 %b, i32 0
760 %e = shufflevector <4 x i32> %d, <4 x i32> poison, <4 x i32> zeroinitializer
761 %f = zext <4 x i16> %a to <4 x i32>
762 %g = sub <4 x i32> %e, %f
766 define <2 x i64> @vwsubu_vx_v2i64_i8(ptr %x, ptr %y) nounwind {
767 ; RV32-LABEL: vwsubu_vx_v2i64_i8:
769 ; RV32-NEXT: addi sp, sp, -16
770 ; RV32-NEXT: lbu a1, 0(a1)
771 ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
772 ; RV32-NEXT: vle32.v v9, (a0)
773 ; RV32-NEXT: sw zero, 12(sp)
774 ; RV32-NEXT: sw a1, 8(sp)
775 ; RV32-NEXT: addi a0, sp, 8
776 ; RV32-NEXT: vlse64.v v8, (a0), zero
777 ; RV32-NEXT: vwsubu.wv v8, v8, v9
778 ; RV32-NEXT: addi sp, sp, 16
781 ; RV64-LABEL: vwsubu_vx_v2i64_i8:
783 ; RV64-NEXT: lbu a1, 0(a1)
784 ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
785 ; RV64-NEXT: vle32.v v9, (a0)
786 ; RV64-NEXT: vmv.v.x v10, a1
787 ; RV64-NEXT: vwsubu.vv v8, v10, v9
789 %a = load <2 x i32>, ptr %x
791 %c = zext i8 %b to i64
792 %d = insertelement <2 x i64> poison, i64 %c, i64 0
793 %e = shufflevector <2 x i64> %d, <2 x i64> poison, <2 x i32> zeroinitializer
794 %f = zext <2 x i32> %a to <2 x i64>
795 %g = sub <2 x i64> %e, %f
799 define <2 x i64> @vwsubu_vx_v2i64_i16(ptr %x, ptr %y) nounwind {
800 ; RV32-LABEL: vwsubu_vx_v2i64_i16:
802 ; RV32-NEXT: addi sp, sp, -16
803 ; RV32-NEXT: lhu a1, 0(a1)
804 ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
805 ; RV32-NEXT: vle32.v v9, (a0)
806 ; RV32-NEXT: sw zero, 12(sp)
807 ; RV32-NEXT: sw a1, 8(sp)
808 ; RV32-NEXT: addi a0, sp, 8
809 ; RV32-NEXT: vlse64.v v8, (a0), zero
810 ; RV32-NEXT: vwsubu.wv v8, v8, v9
811 ; RV32-NEXT: addi sp, sp, 16
814 ; RV64-LABEL: vwsubu_vx_v2i64_i16:
816 ; RV64-NEXT: lhu a1, 0(a1)
817 ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
818 ; RV64-NEXT: vle32.v v9, (a0)
819 ; RV64-NEXT: vmv.v.x v10, a1
820 ; RV64-NEXT: vwsubu.vv v8, v10, v9
822 %a = load <2 x i32>, ptr %x
823 %b = load i16, ptr %y
824 %c = zext i16 %b to i64
825 %d = insertelement <2 x i64> poison, i64 %c, i64 0
826 %e = shufflevector <2 x i64> %d, <2 x i64> poison, <2 x i32> zeroinitializer
827 %f = zext <2 x i32> %a to <2 x i64>
828 %g = sub <2 x i64> %e, %f
832 define <2 x i64> @vwsubu_vx_v2i64_i32(ptr %x, ptr %y) nounwind {
833 ; RV32-LABEL: vwsubu_vx_v2i64_i32:
835 ; RV32-NEXT: addi sp, sp, -16
836 ; RV32-NEXT: lw a1, 0(a1)
837 ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
838 ; RV32-NEXT: vle32.v v9, (a0)
839 ; RV32-NEXT: sw zero, 12(sp)
840 ; RV32-NEXT: sw a1, 8(sp)
841 ; RV32-NEXT: addi a0, sp, 8
842 ; RV32-NEXT: vlse64.v v8, (a0), zero
843 ; RV32-NEXT: vwsubu.wv v8, v8, v9
844 ; RV32-NEXT: addi sp, sp, 16
847 ; RV64-LABEL: vwsubu_vx_v2i64_i32:
849 ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
850 ; RV64-NEXT: vle32.v v9, (a0)
851 ; RV64-NEXT: vlse32.v v10, (a1), zero
852 ; RV64-NEXT: vwsubu.vv v8, v10, v9
854 %a = load <2 x i32>, ptr %x
855 %b = load i32, ptr %y
856 %c = zext i32 %b to i64
857 %d = insertelement <2 x i64> poison, i64 %c, i64 0
858 %e = shufflevector <2 x i64> %d, <2 x i64> poison, <2 x i32> zeroinitializer
859 %f = zext <2 x i32> %a to <2 x i64>
860 %g = sub <2 x i64> %e, %f
864 define <2 x i64> @vwsubu_vx_v2i64_i64(ptr %x, ptr %y) nounwind {
865 ; RV32-LABEL: vwsubu_vx_v2i64_i64:
867 ; RV32-NEXT: addi sp, sp, -16
868 ; RV32-NEXT: lw a2, 4(a1)
869 ; RV32-NEXT: lw a1, 0(a1)
870 ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
871 ; RV32-NEXT: vle32.v v9, (a0)
872 ; RV32-NEXT: sw a2, 12(sp)
873 ; RV32-NEXT: sw a1, 8(sp)
874 ; RV32-NEXT: addi a0, sp, 8
875 ; RV32-NEXT: vlse64.v v8, (a0), zero
876 ; RV32-NEXT: vwsubu.wv v8, v8, v9
877 ; RV32-NEXT: addi sp, sp, 16
880 ; RV64-LABEL: vwsubu_vx_v2i64_i64:
882 ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
883 ; RV64-NEXT: vle32.v v9, (a0)
884 ; RV64-NEXT: vlse64.v v8, (a1), zero
885 ; RV64-NEXT: vwsubu.wv v8, v8, v9
887 %a = load <2 x i32>, ptr %x
888 %b = load i64, ptr %y
889 %d = insertelement <2 x i64> poison, i64 %b, i64 0
890 %e = shufflevector <2 x i64> %d, <2 x i64> poison, <2 x i32> zeroinitializer
891 %f = zext <2 x i32> %a to <2 x i64>
892 %g = sub <2 x i64> %e, %f
896 define <2 x i32> @vwsubu_v2i32_of_v2i8(ptr %x, ptr %y) {
897 ; CHECK-LABEL: vwsubu_v2i32_of_v2i8:
899 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
900 ; CHECK-NEXT: vle8.v v8, (a0)
901 ; CHECK-NEXT: vle8.v v9, (a1)
902 ; CHECK-NEXT: vwsubu.vv v10, v8, v9
903 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
904 ; CHECK-NEXT: vsext.vf2 v8, v10
906 %a = load <2 x i8>, ptr %x
907 %b = load <2 x i8>, ptr %y
908 %c = zext <2 x i8> %a to <2 x i32>
909 %d = zext <2 x i8> %b to <2 x i32>
910 %e = sub <2 x i32> %c, %d
914 define <2 x i64> @vwsubu_v2i64_of_v2i8(ptr %x, ptr %y) {
915 ; CHECK-LABEL: vwsubu_v2i64_of_v2i8:
917 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
918 ; CHECK-NEXT: vle8.v v8, (a0)
919 ; CHECK-NEXT: vle8.v v9, (a1)
920 ; CHECK-NEXT: vwsubu.vv v10, v8, v9
921 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
922 ; CHECK-NEXT: vsext.vf4 v8, v10
924 %a = load <2 x i8>, ptr %x
925 %b = load <2 x i8>, ptr %y
926 %c = zext <2 x i8> %a to <2 x i64>
927 %d = zext <2 x i8> %b to <2 x i64>
928 %e = sub <2 x i64> %c, %d
932 define <2 x i64> @vwsubu_v2i64_of_v2i16(ptr %x, ptr %y) {
933 ; CHECK-LABEL: vwsubu_v2i64_of_v2i16:
935 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
936 ; CHECK-NEXT: vle16.v v8, (a0)
937 ; CHECK-NEXT: vle16.v v9, (a1)
938 ; CHECK-NEXT: vwsubu.vv v10, v8, v9
939 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
940 ; CHECK-NEXT: vsext.vf2 v8, v10
942 %a = load <2 x i16>, ptr %x
943 %b = load <2 x i16>, ptr %y
944 %c = zext <2 x i16> %a to <2 x i64>
945 %d = zext <2 x i16> %b to <2 x i64>
946 %e = sub <2 x i64> %c, %d