1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v < %s | FileCheck %s --check-prefix=RV32
3 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s --check-prefix=RV64
5 define <vscale x 1 x i16> @test_urem_vec_even_divisor_eq0(<vscale x 1 x i16> %x) nounwind {
6 ; RV32-LABEL: test_urem_vec_even_divisor_eq0:
8 ; RV32-NEXT: lui a0, 1048571
9 ; RV32-NEXT: addi a0, a0, -1365
10 ; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
11 ; RV32-NEXT: vmul.vx v8, v8, a0
12 ; RV32-NEXT: vsll.vi v9, v8, 15
13 ; RV32-NEXT: vsrl.vi v8, v8, 1
14 ; RV32-NEXT: vor.vv v8, v8, v9
15 ; RV32-NEXT: lui a0, 3
16 ; RV32-NEXT: addi a0, a0, -1366
17 ; RV32-NEXT: vmsgtu.vx v0, v8, a0
18 ; RV32-NEXT: vmv.v.i v8, 0
19 ; RV32-NEXT: vmerge.vim v8, v8, -1, v0
22 ; RV64-LABEL: test_urem_vec_even_divisor_eq0:
24 ; RV64-NEXT: lui a0, 1048571
25 ; RV64-NEXT: addi a0, a0, -1365
26 ; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
27 ; RV64-NEXT: vmul.vx v8, v8, a0
28 ; RV64-NEXT: vsll.vi v9, v8, 15
29 ; RV64-NEXT: vsrl.vi v8, v8, 1
30 ; RV64-NEXT: vor.vv v8, v8, v9
31 ; RV64-NEXT: lui a0, 3
32 ; RV64-NEXT: addi a0, a0, -1366
33 ; RV64-NEXT: vmsgtu.vx v0, v8, a0
34 ; RV64-NEXT: vmv.v.i v8, 0
35 ; RV64-NEXT: vmerge.vim v8, v8, -1, v0
37 %urem = urem <vscale x 1 x i16> %x, splat (i16 6)
38 %cmp = icmp ne <vscale x 1 x i16> %urem, splat (i16 0)
39 %ext = sext <vscale x 1 x i1> %cmp to <vscale x 1 x i16>
40 ret <vscale x 1 x i16> %ext
43 define <vscale x 1 x i16> @test_urem_vec_odd_divisor_eq0(<vscale x 1 x i16> %x) nounwind {
44 ; RV32-LABEL: test_urem_vec_odd_divisor_eq0:
46 ; RV32-NEXT: lui a0, 1048573
47 ; RV32-NEXT: addi a0, a0, -819
48 ; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
49 ; RV32-NEXT: vmul.vx v8, v8, a0
50 ; RV32-NEXT: lui a0, 3
51 ; RV32-NEXT: addi a0, a0, 819
52 ; RV32-NEXT: vmsgtu.vx v0, v8, a0
53 ; RV32-NEXT: vmv.v.i v8, 0
54 ; RV32-NEXT: vmerge.vim v8, v8, -1, v0
57 ; RV64-LABEL: test_urem_vec_odd_divisor_eq0:
59 ; RV64-NEXT: lui a0, 1048573
60 ; RV64-NEXT: addi a0, a0, -819
61 ; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
62 ; RV64-NEXT: vmul.vx v8, v8, a0
63 ; RV64-NEXT: lui a0, 3
64 ; RV64-NEXT: addi a0, a0, 819
65 ; RV64-NEXT: vmsgtu.vx v0, v8, a0
66 ; RV64-NEXT: vmv.v.i v8, 0
67 ; RV64-NEXT: vmerge.vim v8, v8, -1, v0
69 %urem = urem <vscale x 1 x i16> %x, splat (i16 5)
70 %cmp = icmp ne <vscale x 1 x i16> %urem, splat (i16 0)
71 %ext = sext <vscale x 1 x i1> %cmp to <vscale x 1 x i16>
72 ret <vscale x 1 x i16> %ext
75 define <vscale x 1 x i16> @test_urem_vec_even_divisor_eq1(<vscale x 1 x i16> %x) nounwind {
76 ; RV32-LABEL: test_urem_vec_even_divisor_eq1:
79 ; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
80 ; RV32-NEXT: vsub.vx v8, v8, a0
81 ; RV32-NEXT: lui a0, 1048571
82 ; RV32-NEXT: addi a0, a0, -1365
83 ; RV32-NEXT: vmul.vx v8, v8, a0
84 ; RV32-NEXT: vsll.vi v9, v8, 15
85 ; RV32-NEXT: vsrl.vi v8, v8, 1
86 ; RV32-NEXT: vor.vv v8, v8, v9
87 ; RV32-NEXT: lui a0, 3
88 ; RV32-NEXT: addi a0, a0, -1366
89 ; RV32-NEXT: vmsgtu.vx v0, v8, a0
90 ; RV32-NEXT: vmv.v.i v8, 0
91 ; RV32-NEXT: vmerge.vim v8, v8, -1, v0
94 ; RV64-LABEL: test_urem_vec_even_divisor_eq1:
97 ; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
98 ; RV64-NEXT: vsub.vx v8, v8, a0
99 ; RV64-NEXT: lui a0, 1048571
100 ; RV64-NEXT: addi a0, a0, -1365
101 ; RV64-NEXT: vmul.vx v8, v8, a0
102 ; RV64-NEXT: vsll.vi v9, v8, 15
103 ; RV64-NEXT: vsrl.vi v8, v8, 1
104 ; RV64-NEXT: vor.vv v8, v8, v9
105 ; RV64-NEXT: lui a0, 3
106 ; RV64-NEXT: addi a0, a0, -1366
107 ; RV64-NEXT: vmsgtu.vx v0, v8, a0
108 ; RV64-NEXT: vmv.v.i v8, 0
109 ; RV64-NEXT: vmerge.vim v8, v8, -1, v0
111 %urem = urem <vscale x 1 x i16> %x, splat (i16 6)
112 %cmp = icmp ne <vscale x 1 x i16> %urem, splat (i16 1)
113 %ext = sext <vscale x 1 x i1> %cmp to <vscale x 1 x i16>
114 ret <vscale x 1 x i16> %ext
117 define <vscale x 1 x i16> @test_urem_vec_odd_divisor_eq1(<vscale x 1 x i16> %x) nounwind {
118 ; RV32-LABEL: test_urem_vec_odd_divisor_eq1:
120 ; RV32-NEXT: li a0, 1
121 ; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
122 ; RV32-NEXT: vsub.vx v8, v8, a0
123 ; RV32-NEXT: lui a0, 1048573
124 ; RV32-NEXT: addi a0, a0, -819
125 ; RV32-NEXT: vmul.vx v8, v8, a0
126 ; RV32-NEXT: lui a0, 3
127 ; RV32-NEXT: addi a0, a0, 818
128 ; RV32-NEXT: vmsgtu.vx v0, v8, a0
129 ; RV32-NEXT: vmv.v.i v8, 0
130 ; RV32-NEXT: vmerge.vim v8, v8, -1, v0
133 ; RV64-LABEL: test_urem_vec_odd_divisor_eq1:
135 ; RV64-NEXT: li a0, 1
136 ; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
137 ; RV64-NEXT: vsub.vx v8, v8, a0
138 ; RV64-NEXT: lui a0, 1048573
139 ; RV64-NEXT: addi a0, a0, -819
140 ; RV64-NEXT: vmul.vx v8, v8, a0
141 ; RV64-NEXT: lui a0, 3
142 ; RV64-NEXT: addi a0, a0, 818
143 ; RV64-NEXT: vmsgtu.vx v0, v8, a0
144 ; RV64-NEXT: vmv.v.i v8, 0
145 ; RV64-NEXT: vmerge.vim v8, v8, -1, v0
147 %urem = urem <vscale x 1 x i16> %x, splat (i16 5)
148 %cmp = icmp ne <vscale x 1 x i16> %urem, splat (i16 1)
149 %ext = sext <vscale x 1 x i1> %cmp to <vscale x 1 x i16>
150 ret <vscale x 1 x i16> %ext