1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
3 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
5 define <vscale x 8 x i64> @vwadd_wv_mask_v8i32(<vscale x 8 x i32> %x, <vscale x 8 x i64> %y) {
6 ; CHECK-LABEL: vwadd_wv_mask_v8i32:
8 ; CHECK-NEXT: li a0, 42
9 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
10 ; CHECK-NEXT: vmslt.vx v0, v8, a0
11 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, mu
12 ; CHECK-NEXT: vwadd.wv v16, v16, v8, v0.t
13 ; CHECK-NEXT: vmv8r.v v8, v16
15 %mask = icmp slt <vscale x 8 x i32> %x, splat (i32 42)
16 %a = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %x, <vscale x 8 x i32> zeroinitializer
17 %sa = sext <vscale x 8 x i32> %a to <vscale x 8 x i64>
18 %ret = add <vscale x 8 x i64> %sa, %y
19 ret <vscale x 8 x i64> %ret
22 define <vscale x 8 x i64> @vwaddu_wv_mask_v8i32(<vscale x 8 x i32> %x, <vscale x 8 x i64> %y) {
23 ; CHECK-LABEL: vwaddu_wv_mask_v8i32:
25 ; CHECK-NEXT: li a0, 42
26 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
27 ; CHECK-NEXT: vmslt.vx v0, v8, a0
28 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, mu
29 ; CHECK-NEXT: vwaddu.wv v16, v16, v8, v0.t
30 ; CHECK-NEXT: vmv8r.v v8, v16
32 %mask = icmp slt <vscale x 8 x i32> %x, splat (i32 42)
33 %a = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %x, <vscale x 8 x i32> zeroinitializer
34 %sa = zext <vscale x 8 x i32> %a to <vscale x 8 x i64>
35 %ret = add <vscale x 8 x i64> %sa, %y
36 ret <vscale x 8 x i64> %ret
39 define <vscale x 8 x i64> @vwaddu_vv_mask_v8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %y) {
40 ; CHECK-LABEL: vwaddu_vv_mask_v8i32:
42 ; CHECK-NEXT: li a0, 42
43 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
44 ; CHECK-NEXT: vmslt.vx v0, v8, a0
45 ; CHECK-NEXT: vmv.v.i v16, 0
46 ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
47 ; CHECK-NEXT: vwaddu.vv v16, v8, v12
48 ; CHECK-NEXT: vmv8r.v v8, v16
50 %mask = icmp slt <vscale x 8 x i32> %x, splat (i32 42)
51 %a = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %x, <vscale x 8 x i32> zeroinitializer
52 %sa = zext <vscale x 8 x i32> %a to <vscale x 8 x i64>
53 %sy = zext <vscale x 8 x i32> %y to <vscale x 8 x i64>
54 %ret = add <vscale x 8 x i64> %sa, %sy
55 ret <vscale x 8 x i64> %ret
58 define <vscale x 8 x i64> @vwadd_wv_mask_v8i32_commutative(<vscale x 8 x i32> %x, <vscale x 8 x i64> %y) {
59 ; CHECK-LABEL: vwadd_wv_mask_v8i32_commutative:
61 ; CHECK-NEXT: li a0, 42
62 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
63 ; CHECK-NEXT: vmslt.vx v0, v8, a0
64 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, mu
65 ; CHECK-NEXT: vwadd.wv v16, v16, v8, v0.t
66 ; CHECK-NEXT: vmv8r.v v8, v16
68 %mask = icmp slt <vscale x 8 x i32> %x, splat (i32 42)
69 %a = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %x, <vscale x 8 x i32> zeroinitializer
70 %sa = sext <vscale x 8 x i32> %a to <vscale x 8 x i64>
71 %ret = add <vscale x 8 x i64> %y, %sa
72 ret <vscale x 8 x i64> %ret
75 define <vscale x 8 x i64> @vwadd_wv_mask_v8i32_nonzero(<vscale x 8 x i32> %x, <vscale x 8 x i64> %y) {
76 ; CHECK-LABEL: vwadd_wv_mask_v8i32_nonzero:
78 ; CHECK-NEXT: li a0, 42
79 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
80 ; CHECK-NEXT: vmslt.vx v0, v8, a0
81 ; CHECK-NEXT: vmv.v.i v12, 1
82 ; CHECK-NEXT: vmerge.vvm v24, v12, v8, v0
83 ; CHECK-NEXT: vwadd.wv v8, v16, v24
85 %mask = icmp slt <vscale x 8 x i32> %x, splat (i32 42)
86 %a = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %x, <vscale x 8 x i32> splat (i32 1)
87 %sa = sext <vscale x 8 x i32> %a to <vscale x 8 x i64>
88 %ret = add <vscale x 8 x i64> %sa, %y
89 ret <vscale x 8 x i64> %ret